aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-18 16:00:54 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-18 16:00:54 -0400
commitbb2c018b09b681d43f5e08124b83e362647ea82b (patch)
treed794902c78f9fdd04ed88a4b8d451ed6f9292ec0 /drivers
parent82638844d9a8581bbf33201cc209a14876eca167 (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
Merge branch 'linus' into cpus4096
Conflicts: drivers/acpi/processor_throttling.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/bay.c3
-rw-r--r--drivers/acpi/bus.c24
-rw-r--r--drivers/acpi/dispatcher/dsinit.c2
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c1
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c22
-rw-r--r--drivers/acpi/dispatcher/dswexec.c8
-rw-r--r--drivers/acpi/dispatcher/dswstate.c20
-rw-r--r--drivers/acpi/dock.c3
-rw-r--r--drivers/acpi/events/evevent.c6
-rw-r--r--drivers/acpi/events/evgpe.c36
-rw-r--r--drivers/acpi/events/evgpeblk.c18
-rw-r--r--drivers/acpi/events/evmisc.c4
-rw-r--r--drivers/acpi/events/evregion.c5
-rw-r--r--drivers/acpi/events/evrgnini.c2
-rw-r--r--drivers/acpi/events/evxfevnt.c18
-rw-r--r--drivers/acpi/executer/exconfig.c17
-rw-r--r--drivers/acpi/executer/exconvrt.c12
-rw-r--r--drivers/acpi/executer/excreate.c2
-rw-r--r--drivers/acpi/executer/exdump.c67
-rw-r--r--drivers/acpi/executer/exfldio.c9
-rw-r--r--drivers/acpi/executer/exmisc.c8
-rw-r--r--drivers/acpi/executer/exprep.c8
-rw-r--r--drivers/acpi/executer/exregion.c2
-rw-r--r--drivers/acpi/executer/exresop.c4
-rw-r--r--drivers/acpi/executer/exstore.c6
-rw-r--r--drivers/acpi/fan.c8
-rw-r--r--drivers/acpi/glue.c5
-rw-r--r--drivers/acpi/hardware/hwgpe.c52
-rw-r--r--drivers/acpi/namespace/nsdump.c6
-rw-r--r--drivers/acpi/namespace/nseval.c35
-rw-r--r--drivers/acpi/namespace/nsinit.c1
-rw-r--r--drivers/acpi/namespace/nsload.c3
-rw-r--r--drivers/acpi/namespace/nsparse.c15
-rw-r--r--drivers/acpi/namespace/nsutils.c50
-rw-r--r--drivers/acpi/namespace/nsxfeval.c3
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/parser/psargs.c4
-rw-r--r--drivers/acpi/parser/psxface.c4
-rw-r--r--drivers/acpi/pci_irq.c38
-rw-r--r--drivers/acpi/pci_slot.c368
-rw-r--r--drivers/acpi/power.c138
-rw-r--r--drivers/acpi/processor_core.c75
-rw-r--r--drivers/acpi/processor_idle.c34
-rw-r--r--drivers/acpi/processor_perflib.c18
-rw-r--r--drivers/acpi/processor_throttling.c38
-rw-r--r--drivers/acpi/reboot.c50
-rw-r--r--drivers/acpi/resources/rscalc.c4
-rw-r--r--drivers/acpi/resources/rscreate.c41
-rw-r--r--drivers/acpi/resources/rsmisc.c2
-rw-r--r--drivers/acpi/resources/rsutils.c13
-rw-r--r--drivers/acpi/scan.c104
-rw-r--r--drivers/acpi/sleep/main.c328
-rw-r--r--drivers/acpi/sleep/wakeup.c13
-rw-r--r--drivers/acpi/system.c169
-rw-r--r--drivers/acpi/tables/tbfadt.c23
-rw-r--r--drivers/acpi/tables/tbfind.c5
-rw-r--r--drivers/acpi/tables/tbinstal.c30
-rw-r--r--drivers/acpi/tables/tbutils.c15
-rw-r--r--drivers/acpi/tables/tbxface.c28
-rw-r--r--drivers/acpi/tables/tbxfroot.c4
-rw-r--r--drivers/acpi/utilities/utalloc.c5
-rw-r--r--drivers/acpi/utilities/utcopy.c4
-rw-r--r--drivers/acpi/utilities/utdebug.c54
-rw-r--r--drivers/acpi/utilities/utdelete.c2
-rw-r--r--drivers/acpi/utilities/uteval.c5
-rw-r--r--drivers/acpi/utilities/utmisc.c39
-rw-r--r--drivers/acpi/utilities/utmutex.c4
-rw-r--r--drivers/acpi/utilities/utobject.c9
-rw-r--r--drivers/acpi/video.c123
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/base/platform.c296
-rw-r--r--drivers/base/power/main.c675
-rw-r--r--drivers/base/power/power.h2
-rw-r--r--drivers/base/power/sysfs.c3
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/block/Kconfig12
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/hd.c (renamed from drivers/ide/legacy/hd.c)3
-rw-r--r--drivers/char/Kconfig8
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/apm-emulation.c346
-rw-r--r--drivers/char/bsr.c312
-rw-r--r--drivers/char/hvc_console.c8
-rw-r--r--drivers/char/hvc_console.h10
-rw-r--r--drivers/char/hw_random/pasemi-rng.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/viotape.c23
-rw-r--r--drivers/hwmon/ad7418.c109
-rw-r--r--drivers/hwmon/adm1021.c105
-rw-r--r--drivers/hwmon/adm1025.c101
-rw-r--r--drivers/hwmon/adm1026.c109
-rw-r--r--drivers/hwmon/adm1029.c97
-rw-r--r--drivers/hwmon/adm1031.c96
-rw-r--r--drivers/hwmon/adm9240.c93
-rw-r--r--drivers/hwmon/ads7828.c89
-rw-r--r--drivers/hwmon/adt7470.c100
-rw-r--r--drivers/hwmon/adt7473.c102
-rw-r--r--drivers/hwmon/ams/ams-core.c2
-rw-r--r--drivers/hwmon/asb100.c207
-rw-r--r--drivers/hwmon/atxp1.c109
-rw-r--r--drivers/hwmon/ds1621.c99
-rw-r--r--drivers/hwmon/f75375s.c89
-rw-r--r--drivers/hwmon/fscher.c93
-rw-r--r--drivers/hwmon/fschmd.c112
-rw-r--r--drivers/hwmon/fscpos.c94
-rw-r--r--drivers/hwmon/gl518sm.c99
-rw-r--r--drivers/hwmon/gl520sm.c91
-rw-r--r--drivers/hwmon/lm63.c99
-rw-r--r--drivers/hwmon/lm77.c102
-rw-r--r--drivers/hwmon/lm80.c94
-rw-r--r--drivers/hwmon/lm83.c104
-rw-r--r--drivers/hwmon/lm87.c99
-rw-r--r--drivers/hwmon/lm90.c119
-rw-r--r--drivers/hwmon/lm92.c98
-rw-r--r--drivers/hwmon/lm93.c126
-rw-r--r--drivers/hwmon/max1619.c101
-rw-r--r--drivers/hwmon/max6650.c102
-rw-r--r--drivers/hwmon/smsc47m192.c102
-rw-r--r--drivers/hwmon/thmc50.c107
-rw-r--r--drivers/hwmon/w83791d.c205
-rw-r--r--drivers/hwmon/w83792d.c214
-rw-r--r--drivers/hwmon/w83793.c227
-rw-r--r--drivers/hwmon/w83l785ts.c117
-rw-r--r--drivers/hwmon/w83l786ng.c98
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c27
-rw-r--r--drivers/i2c/busses/i2c-mpc.c104
-rw-r--r--drivers/i2c/busses/i2c-nforce2-s4985.c31
-rw-r--r--drivers/i2c/chips/eeprom.c92
-rw-r--r--drivers/i2c/chips/max6875.c120
-rw-r--r--drivers/i2c/chips/pca9539.c109
-rw-r--r--drivers/i2c/chips/pcf8574.c108
-rw-r--r--drivers/i2c/chips/pcf8575.c96
-rw-r--r--drivers/i2c/chips/pcf8591.c94
-rw-r--r--drivers/i2c/i2c-core.c4
-rw-r--r--drivers/ide/Kconfig88
-rw-r--r--drivers/ide/Makefile10
-rw-r--r--drivers/ide/arm/Makefile1
-rw-r--r--drivers/ide/arm/bast-ide.c90
-rw-r--r--drivers/ide/arm/icside.c105
-rw-r--r--drivers/ide/arm/palm_bk3710.c3
-rw-r--r--drivers/ide/arm/rapide.c33
-rw-r--r--drivers/ide/h8300/ide-h8300.c19
-rw-r--r--drivers/ide/ide-cd.c160
-rw-r--r--drivers/ide/ide-disk.c1
-rw-r--r--drivers/ide/ide-floppy.c32
-rw-r--r--drivers/ide/ide-io.c72
-rw-r--r--drivers/ide/ide-iops.c24
-rw-r--r--drivers/ide/ide-lib.c151
-rw-r--r--drivers/ide/ide-pio-blacklist.c94
-rw-r--r--drivers/ide/ide-pnp.c4
-rw-r--r--drivers/ide/ide-probe.c21
-rw-r--r--drivers/ide/ide-tape.c1
-rw-r--r--drivers/ide/ide-taskfile.c33
-rw-r--r--drivers/ide/ide-timings.c (renamed from drivers/ide/ide-timing.h)204
-rw-r--r--drivers/ide/ide.c76
-rw-r--r--drivers/ide/legacy/ali14xx.c3
-rw-r--r--drivers/ide/legacy/buddha.c1
-rw-r--r--drivers/ide/legacy/falconide.c1
-rw-r--r--drivers/ide/legacy/gayle.c1
-rw-r--r--drivers/ide/legacy/ht6560b.c15
-rw-r--r--drivers/ide/legacy/ide-4drives.c25
-rw-r--r--drivers/ide/legacy/ide-cs.c11
-rw-r--r--drivers/ide/legacy/ide_platform.c9
-rw-r--r--drivers/ide/legacy/macide.c1
-rw-r--r--drivers/ide/legacy/q40ide.c1
-rw-r--r--drivers/ide/legacy/qd65xx.c19
-rw-r--r--drivers/ide/mips/au1xxx-ide.c13
-rw-r--r--drivers/ide/mips/swarm.c28
-rw-r--r--drivers/ide/pci/alim15x3.c6
-rw-r--r--drivers/ide/pci/amd74xx.c2
-rw-r--r--drivers/ide/pci/cmd640.c131
-rw-r--r--drivers/ide/pci/cmd64x.c6
-rw-r--r--drivers/ide/pci/cs5535.c6
-rw-r--r--drivers/ide/pci/cy82c693.c9
-rw-r--r--drivers/ide/pci/delkin_cb.c1
-rw-r--r--drivers/ide/pci/it821x.c6
-rw-r--r--drivers/ide/pci/scc_pata.c8
-rw-r--r--drivers/ide/pci/sgiioc4.c23
-rw-r--r--drivers/ide/pci/siimage.c3
-rw-r--r--drivers/ide/pci/sis5513.c3
-rw-r--r--drivers/ide/pci/sl82c105.c3
-rw-r--r--drivers/ide/pci/via82cxxx.c2
-rw-r--r--drivers/ide/ppc/Makefile1
-rw-r--r--drivers/ide/ppc/mpc8xx.c851
-rw-r--r--drivers/ide/ppc/pmac.c21
-rw-r--r--drivers/ide/setup-pci.c25
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c356
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h44
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c209
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c77
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c28
-rw-r--r--drivers/macintosh/adb.c5
-rw-r--r--drivers/macintosh/adbhid.c2
-rw-r--r--drivers/macintosh/macio_sysfs.c12
-rw-r--r--drivers/macintosh/mediabay.c27
-rw-r--r--drivers/macintosh/smu.c38
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c4
-rw-r--r--drivers/macintosh/therm_windtunnel.c18
-rw-r--r--drivers/macintosh/via-pmu68k.c5
-rw-r--r--drivers/md/Kconfig23
-rw-r--r--drivers/md/Makefile7
-rw-r--r--drivers/md/dm-emc.c345
-rw-r--r--drivers/md/dm-hw-handler.c213
-rw-r--r--drivers/md/dm-hw-handler.h63
-rw-r--r--drivers/md/dm-mpath-hp-sw.c247
-rw-r--r--drivers/md/dm-mpath-rdac.c700
-rw-r--r--drivers/md/dm-mpath.c163
-rw-r--r--drivers/md/dm-mpath.h1
-rw-r--r--drivers/message/fusion/lsi/mpi.h2
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h2
-rw-r--r--drivers/message/fusion/mptbase.c91
-rw-r--r--drivers/message/fusion/mptbase.h17
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptctl.h2
-rw-r--r--drivers/message/fusion/mptdebug.h2
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/message/fusion/mptlan.h2
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/message/fusion/mptsas.h2
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/message/fusion/mptspi.c3
-rw-r--r--drivers/misc/Kconfig31
-rw-r--r--drivers/misc/Makefile5
-rw-r--r--drivers/misc/acer-wmi.c145
-rw-r--r--drivers/misc/compal-laptop.c404
-rw-r--r--drivers/misc/eeepc-laptop.c4
-rw-r--r--drivers/misc/fujitsu-laptop.c825
-rw-r--r--drivers/mmc/card/block.c60
-rw-r--r--drivers/mmc/card/mmc_test.c569
-rw-r--r--drivers/mmc/card/sdio_uart.c9
-rw-r--r--drivers/mmc/core/core.c41
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/core/sdio_cis.c6
-rw-r--r--drivers/mmc/core/sdio_io.c167
-rw-r--r--drivers/mmc/host/Kconfig50
-rw-r--r--drivers/mmc/host/Makefile4
-rw-r--r--drivers/mmc/host/at91_mci.c257
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h91
-rw-r--r--drivers/mmc/host/atmel-mci.c981
-rw-r--r--drivers/mmc/host/au1xmmc.c792
-rw-r--r--drivers/mmc/host/au1xmmc.h96
-rw-r--r--drivers/mmc/host/imxmmc.c9
-rw-r--r--drivers/mmc/host/mmc_spi.c33
-rw-r--r--drivers/mmc/host/mmci.c1
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/pxamci.c9
-rw-r--r--drivers/mmc/host/s3cmci.c1446
-rw-r--r--drivers/mmc/host/s3cmci.h70
-rw-r--r--drivers/mmc/host/sdhci-pci.c732
-rw-r--r--drivers/mmc/host/sdhci.c994
-rw-r--r--drivers/mmc/host/sdhci.h120
-rw-r--r--drivers/mmc/host/sdricoh_cs.c575
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/wbsd.c38
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/mtx-1_flash.c95
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/fec_8xx/Kconfig20
-rw-r--r--drivers/net/fec_8xx/Makefile12
-rw-r--r--drivers/net/fec_8xx/fec_8xx-netta.c151
-rw-r--r--drivers/net/fec_8xx/fec_8xx.h220
-rw-r--r--drivers/net/fec_8xx/fec_main.c1264
-rw-r--r--drivers/net/fec_8xx/fec_mii.c418
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ibm_newemac/core.c61
-rw-r--r--drivers/net/ibm_newemac/core.h85
-rw-r--r--drivers/net/ibm_newemac/debug.c52
-rw-r--r--drivers/net/ibm_newemac/emac.h101
-rw-r--r--drivers/net/ibm_newemac/rgmii.c6
-rw-r--r--drivers/net/netconsole.c10
-rw-r--r--drivers/net/ucc_geth.c2
-rw-r--r--drivers/net/ucc_geth_mii.c2
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c20
-rw-r--r--drivers/of/device.c84
-rw-r--r--drivers/of/gpio.c38
-rw-r--r--drivers/of/of_i2c.c1
-rw-r--r--drivers/of/platform.c3
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c85
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c25
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c23
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c4
-rw-r--r--drivers/pci/hotplug/fakephp.c86
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c3
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c284
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_core.c127
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c318
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c5
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c44
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c12
-rw-r--r--drivers/pci/hotplug/shpchp.h14
-rw-r--r--drivers/pci/hotplug/shpchp_core.c37
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c1
-rw-r--r--drivers/pci/intel-iommu.c1
-rw-r--r--drivers/pci/msi.c22
-rw-r--r--drivers/pci/pci-acpi.c277
-rw-r--r--drivers/pci/pci-driver.c388
-rw-r--r--drivers/pci/pci.c479
-rw-r--r--drivers/pci/pci.h48
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c8
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c24
-rw-r--r--drivers/pci/pcie/portdrv_bus.c1
-rw-r--r--drivers/pci/pcie/portdrv_core.c22
-rw-r--r--drivers/pci/pcie/portdrv_pci.c5
-rw-r--r--drivers/pci/probe.c38
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/quirks.c134
-rw-r--r--drivers/pci/setup-bus.c43
-rw-r--r--drivers/pci/setup-irq.c3
-rw-r--r--drivers/pci/setup-res.c70
-rw-r--r--drivers/pci/slot.c233
-rw-r--r--drivers/pcmcia/cistpl.c2
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c4
-rw-r--r--drivers/pnp/base.h148
-rw-r--r--drivers/pnp/core.c29
-rw-r--r--drivers/pnp/interface.c207
-rw-r--r--drivers/pnp/isapnp/core.c253
-rw-r--r--drivers/pnp/manager.c414
-rw-r--r--drivers/pnp/pnpacpi/core.c4
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c492
-rw-r--r--drivers/pnp/pnpbios/rsparser.c274
-rw-r--r--drivers/pnp/quirks.c307
-rw-r--r--drivers/pnp/resource.c454
-rw-r--r--drivers/pnp/support.c171
-rw-r--r--drivers/pnp/system.c4
-rw-r--r--drivers/s390/block/dasd_diag.c25
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/block/dasd_fba.c7
-rw-r--r--drivers/s390/char/vmur.c10
-rw-r--r--drivers/s390/char/zcore.c101
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c9
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/qdio.c3929
-rw-r--r--drivers/s390/cio/qdio.h835
-rw-r--r--drivers/s390/cio/qdio_debug.c240
-rw-r--r--drivers/s390/cio/qdio_debug.h91
-rw-r--r--drivers/s390/cio/qdio_main.c1755
-rw-r--r--drivers/s390/cio/qdio_perf.c151
-rw-r--r--drivers/s390/cio/qdio_perf.h54
-rw-r--r--drivers/s390/cio/qdio_setup.c521
-rw-r--r--drivers/s390/cio/qdio_thinint.c380
-rw-r--r--drivers/s390/net/qeth_core.h12
-rw-r--r--drivers/s390/net/qeth_core_main.c87
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l3_main.c25
-rw-r--r--drivers/s390/scsi/Makefile3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1689
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c152
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c259
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c102
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_def.h341
-rw-r--r--drivers/s390/scsi/zfcp_erp.c3824
-rw-r--r--drivers/s390/scsi/zfcp_ext.h306
-rw-r--r--drivers/s390/scsi/zfcp_fc.c567
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c5573
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h70
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c799
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c784
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c496
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c270
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_driver.c106
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_port.c295
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_unit.c167
-rw-r--r--drivers/scsi/Kconfig27
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/a100u2w.c49
-rw-r--r--drivers/scsi/aacraid/commctrl.c33
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/device_handler/Kconfig32
-rw-r--r--drivers/scsi/device_handler/Makefile7
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c162
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c504
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c207
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c696
-rw-r--r--drivers/scsi/esp_scsi.c24
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ibmvscsi/Makefile1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3910
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h682
-rw-r--r--drivers/scsi/ide-scsi.c14
-rw-r--r--drivers/scsi/iscsi_tcp.c514
-rw-r--r--drivers/scsi/iscsi_tcp.h7
-rw-r--r--drivers/scsi/libiscsi.c1359
-rw-r--r--drivers/scsi/lpfc/lpfc.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c120
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c145
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c232
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c16
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c8
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_debug.c110
-rw-r--r--drivers/scsi/scsi_error.c11
-rw-r--r--drivers/scsi/scsi_lib.c35
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c395
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--drivers/scsi/sd.h62
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_misc.h4
-rw-r--r--drivers/serial/8250_pnp.c24
-rw-r--r--drivers/serial/cpm_uart/cpm_uart.h11
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c393
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.c170
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.h12
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm2.c283
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm2.h12
-rw-r--r--drivers/serial/of_serial.c2
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c2
-rw-r--r--drivers/video/platinumfb.c4
-rw-r--r--drivers/w1/masters/ds2482.c104
-rw-r--r--drivers/watchdog/mpc5200_wdt.c2
438 files changed, 34408 insertions, 31397 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index bba867391a85..735f5ea17473 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -336,6 +336,15 @@ config ACPI_EC
336 the battery and thermal drivers. If you are compiling for a 336 the battery and thermal drivers. If you are compiling for a
337 mobile system, say Y. 337 mobile system, say Y.
338 338
339config ACPI_PCI_SLOT
340 tristate "PCI slot detection driver"
341 default n
342 help
343 This driver will attempt to discover all PCI slots in your system,
344 and creates entries in /sys/bus/pci/slots/. This feature can
345 help you correlate PCI bus addresses with the physical geography
346 of your slots. If you are unsure, say N.
347
339config ACPI_POWER 348config ACPI_POWER
340 bool 349 bool
341 default y 350 default y
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 40b0fcae4c78..52a4cd4b81d0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -21,7 +21,7 @@ obj-$(CONFIG_X86) += blacklist.o
21# 21#
22# ACPI Core Subsystem (Interpreter) 22# ACPI Core Subsystem (Interpreter)
23# 23#
24obj-y += osl.o utils.o \ 24obj-y += osl.o utils.o reboot.o\
25 dispatcher/ events/ executer/ hardware/ \ 25 dispatcher/ events/ executer/ hardware/ \
26 namespace/ parser/ resources/ tables/ \ 26 namespace/ parser/ resources/ tables/ \
27 utilities/ 27 utilities/
@@ -48,6 +48,7 @@ obj-$(CONFIG_ACPI_DOCK) += dock.o
48obj-$(CONFIG_ACPI_BAY) += bay.o 48obj-$(CONFIG_ACPI_BAY) += bay.o
49obj-$(CONFIG_ACPI_VIDEO) += video.o 49obj-$(CONFIG_ACPI_VIDEO) += video.o
50obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o 50obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
51obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
51obj-$(CONFIG_ACPI_POWER) += power.o 52obj-$(CONFIG_ACPI_POWER) += power.o
52obj-$(CONFIG_ACPI_PROCESSOR) += processor.o 53obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
53obj-$(CONFIG_ACPI_CONTAINER) += container.o 54obj-$(CONFIG_ACPI_CONTAINER) += container.o
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
index 61b6c5beb2d3..e6caf5d42e0e 100644
--- a/drivers/acpi/bay.c
+++ b/drivers/acpi/bay.c
@@ -380,6 +380,9 @@ static int __init bay_init(void)
380 if (acpi_disabled) 380 if (acpi_disabled)
381 return -ENODEV; 381 return -ENODEV;
382 382
383 if (acpi_disabled)
384 return -ENODEV;
385
383 /* look for dockable drive bays */ 386 /* look for dockable drive bays */
384 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 387 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
385 ACPI_UINT32_MAX, find_bay, &bays, NULL); 388 ACPI_UINT32_MAX, find_bay, &bays, NULL);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index a6dbcf4d9ef5..ccae305ee55d 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -295,6 +295,28 @@ int acpi_bus_set_power(acpi_handle handle, int state)
295 295
296EXPORT_SYMBOL(acpi_bus_set_power); 296EXPORT_SYMBOL(acpi_bus_set_power);
297 297
298bool acpi_bus_power_manageable(acpi_handle handle)
299{
300 struct acpi_device *device;
301 int result;
302
303 result = acpi_bus_get_device(handle, &device);
304 return result ? false : device->flags.power_manageable;
305}
306
307EXPORT_SYMBOL(acpi_bus_power_manageable);
308
309bool acpi_bus_can_wakeup(acpi_handle handle)
310{
311 struct acpi_device *device;
312 int result;
313
314 result = acpi_bus_get_device(handle, &device);
315 return result ? false : device->wakeup.flags.valid;
316}
317
318EXPORT_SYMBOL(acpi_bus_can_wakeup);
319
298/* -------------------------------------------------------------------------- 320/* --------------------------------------------------------------------------
299 Event Management 321 Event Management
300 -------------------------------------------------------------------------- */ 322 -------------------------------------------------------------------------- */
@@ -612,7 +634,7 @@ static int __init acpi_bus_init_irq(void)
612 return 0; 634 return 0;
613} 635}
614 636
615acpi_native_uint acpi_gbl_permanent_mmap; 637u8 acpi_gbl_permanent_mmap;
616 638
617 639
618void __init acpi_early_init(void) 640void __init acpi_early_init(void)
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c
index 610b1ee102b0..949f7c75029e 100644
--- a/drivers/acpi/dispatcher/dsinit.c
+++ b/drivers/acpi/dispatcher/dsinit.c
@@ -151,7 +151,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
151 ******************************************************************************/ 151 ******************************************************************************/
152 152
153acpi_status 153acpi_status
154acpi_ds_initialize_objects(acpi_native_uint table_index, 154acpi_ds_initialize_objects(u32 table_index,
155 struct acpi_namespace_node * start_node) 155 struct acpi_namespace_node * start_node)
156{ 156{
157 acpi_status status; 157 acpi_status status;
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index 2509809a36cf..4613b9ca5792 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -377,7 +377,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
377 } 377 }
378 378
379 info->parameters = &this_walk_state->operands[0]; 379 info->parameters = &this_walk_state->operands[0];
380 info->parameter_type = ACPI_PARAM_ARGS;
381 380
382 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, 381 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
383 obj_desc->method.aml_start, 382 obj_desc->method.aml_start,
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index a818e0ddb996..6a81c4400edf 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -691,12 +691,6 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
691 691
692 status = acpi_ex_resolve_operands(op->common.aml_opcode, 692 status = acpi_ex_resolve_operands(op->common.aml_opcode,
693 ACPI_WALK_OPERANDS, walk_state); 693 ACPI_WALK_OPERANDS, walk_state);
694
695 ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
696 acpi_ps_get_opcode_name(op->common.aml_opcode),
697 walk_state->num_operands,
698 "after AcpiExResolveOperands");
699
700 if (ACPI_FAILURE(status)) { 694 if (ACPI_FAILURE(status)) {
701 ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)", 695 ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
702 acpi_ps_get_opcode_name(op->common.aml_opcode), 696 acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -785,10 +779,6 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
785 return_ACPI_STATUS(status); 779 return_ACPI_STATUS(status);
786 } 780 }
787 781
788 ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
789 acpi_ps_get_opcode_name(op->common.aml_opcode),
790 1, "after AcpiExResolveOperands");
791
792 obj_desc = acpi_ns_get_attached_object(node); 782 obj_desc = acpi_ns_get_attached_object(node);
793 if (!obj_desc) { 783 if (!obj_desc) {
794 return_ACPI_STATUS(AE_NOT_EXIST); 784 return_ACPI_STATUS(AE_NOT_EXIST);
@@ -848,7 +838,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
848 union acpi_operand_object **operand; 838 union acpi_operand_object **operand;
849 struct acpi_namespace_node *node; 839 struct acpi_namespace_node *node;
850 union acpi_parse_object *next_op; 840 union acpi_parse_object *next_op;
851 acpi_native_uint table_index; 841 u32 table_index;
852 struct acpi_table_header *table; 842 struct acpi_table_header *table;
853 843
854 ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op); 844 ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
@@ -882,10 +872,6 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
882 return_ACPI_STATUS(status); 872 return_ACPI_STATUS(status);
883 } 873 }
884 874
885 ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
886 acpi_ps_get_opcode_name(op->common.aml_opcode),
887 1, "after AcpiExResolveOperands");
888
889 operand = &walk_state->operands[0]; 875 operand = &walk_state->operands[0];
890 876
891 /* Find the ACPI table */ 877 /* Find the ACPI table */
@@ -1091,10 +1077,8 @@ acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
1091 return_ACPI_STATUS(status); 1077 return_ACPI_STATUS(status);
1092 } 1078 }
1093 1079
1094 ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, 1080 ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS,
1095 acpi_ps_get_opcode_name(op->common.aml_opcode), 1081 acpi_ps_get_opcode_name(op->common.aml_opcode), 1);
1096 1, "after AcpiExResolveOperands");
1097
1098 /* 1082 /*
1099 * Get the bank_value operand and save it 1083 * Get the bank_value operand and save it
1100 * (at Top of stack) 1084 * (at Top of stack)
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index b246b9657ead..b5072fa9c920 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -408,14 +408,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
408 [walk_state-> 408 [walk_state->
409 num_operands - 1]), 409 num_operands - 1]),
410 walk_state); 410 walk_state);
411 if (ACPI_SUCCESS(status)) {
412 ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS,
413 ACPI_IMODE_EXECUTE,
414 acpi_ps_get_opcode_name
415 (walk_state->opcode),
416 walk_state->num_operands,
417 "after ExResolveOperands");
418 }
419 } 411 }
420 412
421 if (ACPI_SUCCESS(status)) { 413 if (ACPI_SUCCESS(status)) {
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index 1386ced332ec..b00d4af791aa 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -70,7 +70,7 @@ acpi_status
70acpi_ds_result_pop(union acpi_operand_object **object, 70acpi_ds_result_pop(union acpi_operand_object **object,
71 struct acpi_walk_state *walk_state) 71 struct acpi_walk_state *walk_state)
72{ 72{
73 acpi_native_uint index; 73 u32 index;
74 union acpi_generic_state *state; 74 union acpi_generic_state *state;
75 acpi_status status; 75 acpi_status status;
76 76
@@ -122,7 +122,7 @@ acpi_ds_result_pop(union acpi_operand_object **object,
122 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 122 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
123 "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object, 123 "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object,
124 acpi_ut_get_object_type_name(*object), 124 acpi_ut_get_object_type_name(*object),
125 (u32) index, walk_state, walk_state->result_count)); 125 index, walk_state, walk_state->result_count));
126 126
127 return (AE_OK); 127 return (AE_OK);
128} 128}
@@ -146,7 +146,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
146{ 146{
147 union acpi_generic_state *state; 147 union acpi_generic_state *state;
148 acpi_status status; 148 acpi_status status;
149 acpi_native_uint index; 149 u32 index;
150 150
151 ACPI_FUNCTION_NAME(ds_result_push); 151 ACPI_FUNCTION_NAME(ds_result_push);
152 152
@@ -400,7 +400,7 @@ void
400acpi_ds_obj_stack_pop_and_delete(u32 pop_count, 400acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
401 struct acpi_walk_state *walk_state) 401 struct acpi_walk_state *walk_state)
402{ 402{
403 acpi_native_int i; 403 s32 i;
404 union acpi_operand_object *obj_desc; 404 union acpi_operand_object *obj_desc;
405 405
406 ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete); 406 ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete);
@@ -409,7 +409,7 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
409 return; 409 return;
410 } 410 }
411 411
412 for (i = (acpi_native_int) (pop_count - 1); i >= 0; i--) { 412 for (i = (s32) pop_count - 1; i >= 0; i--) {
413 if (walk_state->num_operands == 0) { 413 if (walk_state->num_operands == 0) {
414 return; 414 return;
415 } 415 }
@@ -615,14 +615,8 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
615 walk_state->pass_number = pass_number; 615 walk_state->pass_number = pass_number;
616 616
617 if (info) { 617 if (info) {
618 if (info->parameter_type == ACPI_PARAM_GPE) { 618 walk_state->params = info->parameters;
619 walk_state->gpe_event_info = 619 walk_state->caller_return_desc = &info->return_object;
620 ACPI_CAST_PTR(struct acpi_gpe_event_info,
621 info->parameters);
622 } else {
623 walk_state->params = info->parameters;
624 walk_state->caller_return_desc = &info->return_object;
625 }
626 } 620 }
627 621
628 status = acpi_ps_init_scope(&walk_state->parser_state, op); 622 status = acpi_ps_init_scope(&walk_state->parser_state, op);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index bb7c51f712bd..1e872e79db33 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -920,6 +920,9 @@ static int __init dock_init(void)
920 if (acpi_disabled) 920 if (acpi_disabled)
921 return 0; 921 return 0;
922 922
923 if (acpi_disabled)
924 return 0;
925
923 /* look for a dock station */ 926 /* look for a dock station */
924 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 927 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
925 ACPI_UINT32_MAX, find_dock, &num, NULL); 928 ACPI_UINT32_MAX, find_dock, &num, NULL);
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index 5d30e5be1b1c..c56c5c6ea77b 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -188,7 +188,7 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
188 188
189static acpi_status acpi_ev_fixed_event_initialize(void) 189static acpi_status acpi_ev_fixed_event_initialize(void)
190{ 190{
191 acpi_native_uint i; 191 u32 i;
192 acpi_status status; 192 acpi_status status;
193 193
194 /* 194 /*
@@ -231,7 +231,7 @@ u32 acpi_ev_fixed_event_detect(void)
231 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; 231 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
232 u32 fixed_status; 232 u32 fixed_status;
233 u32 fixed_enable; 233 u32 fixed_enable;
234 acpi_native_uint i; 234 u32 i;
235 235
236 ACPI_FUNCTION_NAME(ev_fixed_event_detect); 236 ACPI_FUNCTION_NAME(ev_fixed_event_detect);
237 237
@@ -260,7 +260,7 @@ u32 acpi_ev_fixed_event_detect(void)
260 260
261 /* Found an active (signalled) event */ 261 /* Found an active (signalled) event */
262 acpi_os_fixed_event_count(i); 262 acpi_os_fixed_event_count(i);
263 int_status |= acpi_ev_fixed_event_dispatch((u32) i); 263 int_status |= acpi_ev_fixed_event_dispatch(i);
264 } 264 }
265 } 265 }
266 266
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index 5354be44f876..c5e53aae86f7 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -256,7 +256,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
256 return_ACPI_STATUS(status); 256 return_ACPI_STATUS(status);
257 } 257 }
258 258
259 /* Mark wake-disabled or HW disable, or both */ 259 /* Clear the appropriate enabled flags for this GPE */
260 260
261 switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { 261 switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
262 case ACPI_GPE_TYPE_WAKE: 262 case ACPI_GPE_TYPE_WAKE:
@@ -273,13 +273,23 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
273 /* Disable the requested runtime GPE */ 273 /* Disable the requested runtime GPE */
274 274
275 ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); 275 ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
276 276 break;
277 /* fallthrough */
278 277
279 default: 278 default:
280 acpi_hw_write_gpe_enable_reg(gpe_event_info); 279 break;
281 } 280 }
282 281
282 /*
283 * Even if we don't know the GPE type, make sure that we always
284 * disable it. low_disable_gpe will just clear the enable bit for this
285 * GPE and write it. It will not write out the current GPE enable mask,
286 * since this may inadvertently enable GPEs too early, if a rogue GPE has
287 * come in during ACPICA initialization - possibly as a result of AML or
288 * other code that has enabled the GPE.
289 */
290 status = acpi_hw_low_disable_gpe(gpe_event_info);
291 return_ACPI_STATUS(status);
292
283 return_ACPI_STATUS(AE_OK); 293 return_ACPI_STATUS(AE_OK);
284} 294}
285 295
@@ -305,7 +315,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
305{ 315{
306 union acpi_operand_object *obj_desc; 316 union acpi_operand_object *obj_desc;
307 struct acpi_gpe_block_info *gpe_block; 317 struct acpi_gpe_block_info *gpe_block;
308 acpi_native_uint i; 318 u32 i;
309 319
310 ACPI_FUNCTION_ENTRY(); 320 ACPI_FUNCTION_ENTRY();
311 321
@@ -379,8 +389,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
379 u32 status_reg; 389 u32 status_reg;
380 u32 enable_reg; 390 u32 enable_reg;
381 acpi_cpu_flags flags; 391 acpi_cpu_flags flags;
382 acpi_native_uint i; 392 u32 i;
383 acpi_native_uint j; 393 u32 j;
384 394
385 ACPI_FUNCTION_NAME(ev_gpe_detect); 395 ACPI_FUNCTION_NAME(ev_gpe_detect);
386 396
@@ -462,13 +472,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
462 */ 472 */
463 int_status |= 473 int_status |=
464 acpi_ev_gpe_dispatch(&gpe_block-> 474 acpi_ev_gpe_dispatch(&gpe_block->
465 event_info[(i * 475 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
466 ACPI_GPE_REGISTER_WIDTH)
467 +
468 j],
469 (u32) j +
470 gpe_register_info->
471 base_gpe_number);
472 } 476 }
473 } 477 }
474 } 478 }
@@ -555,10 +559,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
555 */ 559 */
556 info->prefix_node = 560 info->prefix_node =
557 local_gpe_event_info.dispatch.method_node; 561 local_gpe_event_info.dispatch.method_node;
558 info->parameters =
559 ACPI_CAST_PTR(union acpi_operand_object *,
560 gpe_event_info);
561 info->parameter_type = ACPI_PARAM_GPE;
562 info->flags = ACPI_IGNORE_RETURN_VALUE; 562 info->flags = ACPI_IGNORE_RETURN_VALUE;
563 563
564 status = acpi_ns_evaluate(info); 564 status = acpi_ns_evaluate(info);
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index e6c4d4c49e79..73c058e2f5c2 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -189,8 +189,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
189 struct acpi_gpe_block_info *gpe_block) 189 struct acpi_gpe_block_info *gpe_block)
190{ 190{
191 struct acpi_gpe_event_info *gpe_event_info; 191 struct acpi_gpe_event_info *gpe_event_info;
192 acpi_native_uint i; 192 u32 i;
193 acpi_native_uint j; 193 u32 j;
194 194
195 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers); 195 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
196 196
@@ -203,7 +203,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
203 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 203 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
204 gpe_event_info = 204 gpe_event_info =
205 &gpe_block-> 205 &gpe_block->
206 event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j]; 206 event_info[((acpi_size) i *
207 ACPI_GPE_REGISTER_WIDTH) + j];
207 208
208 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 209 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
209 ACPI_GPE_DISPATCH_HANDLER) { 210 ACPI_GPE_DISPATCH_HANDLER) {
@@ -744,8 +745,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
744 struct acpi_gpe_event_info *gpe_event_info = NULL; 745 struct acpi_gpe_event_info *gpe_event_info = NULL;
745 struct acpi_gpe_event_info *this_event; 746 struct acpi_gpe_event_info *this_event;
746 struct acpi_gpe_register_info *this_register; 747 struct acpi_gpe_register_info *this_register;
747 acpi_native_uint i; 748 u32 i;
748 acpi_native_uint j; 749 u32 j;
749 acpi_status status; 750 acpi_status status;
750 751
751 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks); 752 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
@@ -983,8 +984,8 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
983 struct acpi_gpe_walk_info gpe_info; 984 struct acpi_gpe_walk_info gpe_info;
984 u32 wake_gpe_count; 985 u32 wake_gpe_count;
985 u32 gpe_enabled_count; 986 u32 gpe_enabled_count;
986 acpi_native_uint i; 987 u32 i;
987 acpi_native_uint j; 988 u32 j;
988 989
989 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); 990 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
990 991
@@ -1033,7 +1034,8 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
1033 1034
1034 gpe_event_info = 1035 gpe_event_info =
1035 &gpe_block-> 1036 &gpe_block->
1036 event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j]; 1037 event_info[((acpi_size) i *
1038 ACPI_GPE_REGISTER_WIDTH) + j];
1037 1039
1038 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 1040 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
1039 ACPI_GPE_DISPATCH_METHOD) 1041 ACPI_GPE_DISPATCH_METHOD)
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index 2113e58e2221..1d5670be729a 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -575,7 +575,7 @@ acpi_status acpi_ev_release_global_lock(void)
575 575
576void acpi_ev_terminate(void) 576void acpi_ev_terminate(void)
577{ 577{
578 acpi_native_uint i; 578 u32 i;
579 acpi_status status; 579 acpi_status status;
580 580
581 ACPI_FUNCTION_TRACE(ev_terminate); 581 ACPI_FUNCTION_TRACE(ev_terminate);
@@ -589,7 +589,7 @@ void acpi_ev_terminate(void)
589 /* Disable all fixed events */ 589 /* Disable all fixed events */
590 590
591 for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { 591 for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
592 status = acpi_disable_event((u32) i, 0); 592 status = acpi_disable_event(i, 0);
593 if (ACPI_FAILURE(status)) { 593 if (ACPI_FAILURE(status)) {
594 ACPI_ERROR((AE_INFO, 594 ACPI_ERROR((AE_INFO,
595 "Could not disable fixed event %d", 595 "Could not disable fixed event %d",
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 1628f5934752..236fbd1ca438 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -81,7 +81,7 @@ acpi_ev_install_handler(acpi_handle obj_handle,
81acpi_status acpi_ev_install_region_handlers(void) 81acpi_status acpi_ev_install_region_handlers(void)
82{ 82{
83 acpi_status status; 83 acpi_status status;
84 acpi_native_uint i; 84 u32 i;
85 85
86 ACPI_FUNCTION_TRACE(ev_install_region_handlers); 86 ACPI_FUNCTION_TRACE(ev_install_region_handlers);
87 87
@@ -151,7 +151,7 @@ acpi_status acpi_ev_install_region_handlers(void)
151acpi_status acpi_ev_initialize_op_regions(void) 151acpi_status acpi_ev_initialize_op_regions(void)
152{ 152{
153 acpi_status status; 153 acpi_status status;
154 acpi_native_uint i; 154 u32 i;
155 155
156 ACPI_FUNCTION_TRACE(ev_initialize_op_regions); 156 ACPI_FUNCTION_TRACE(ev_initialize_op_regions);
157 157
@@ -219,7 +219,6 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
219 info->prefix_node = region_obj2->extra.method_REG; 219 info->prefix_node = region_obj2->extra.method_REG;
220 info->pathname = NULL; 220 info->pathname = NULL;
221 info->parameters = args; 221 info->parameters = args;
222 info->parameter_type = ACPI_PARAM_ARGS;
223 info->flags = ACPI_IGNORE_RETURN_VALUE; 222 info->flags = ACPI_IGNORE_RETURN_VALUE;
224 223
225 /* 224 /*
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index 2e3d2c5e4f4d..6b94b38df07d 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -380,7 +380,7 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
380 acpi_status status; 380 acpi_status status;
381 struct acpica_device_id hid; 381 struct acpica_device_id hid;
382 struct acpi_compatible_id_list *cid; 382 struct acpi_compatible_id_list *cid;
383 acpi_native_uint i; 383 u32 i;
384 384
385 /* 385 /*
386 * Get the _HID and check for a PCI Root Bridge 386 * Get the _HID and check for a PCI Root Bridge
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 99a7502e6a87..73bfd6bf962f 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -472,7 +472,6 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
472} 472}
473 473
474ACPI_EXPORT_SYMBOL(acpi_clear_gpe) 474ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
475#ifdef ACPI_FUTURE_USAGE
476/******************************************************************************* 475/*******************************************************************************
477 * 476 *
478 * FUNCTION: acpi_get_event_status 477 * FUNCTION: acpi_get_event_status
@@ -489,6 +488,7 @@ ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
489acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) 488acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
490{ 489{
491 acpi_status status = AE_OK; 490 acpi_status status = AE_OK;
491 u32 value;
492 492
493 ACPI_FUNCTION_TRACE(acpi_get_event_status); 493 ACPI_FUNCTION_TRACE(acpi_get_event_status);
494 494
@@ -506,7 +506,20 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
506 506
507 status = 507 status =
508 acpi_get_register(acpi_gbl_fixed_event_info[event]. 508 acpi_get_register(acpi_gbl_fixed_event_info[event].
509 status_register_id, event_status); 509 enable_register_id, &value);
510 if (ACPI_FAILURE(status))
511 return_ACPI_STATUS(status);
512
513 *event_status = value;
514
515 status =
516 acpi_get_register(acpi_gbl_fixed_event_info[event].
517 status_register_id, &value);
518 if (ACPI_FAILURE(status))
519 return_ACPI_STATUS(status);
520
521 if (value)
522 *event_status |= ACPI_EVENT_FLAG_SET;
510 523
511 return_ACPI_STATUS(status); 524 return_ACPI_STATUS(status);
512} 525}
@@ -566,7 +579,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
566} 579}
567 580
568ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) 581ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
569#endif /* ACPI_FUTURE_USAGE */
570/******************************************************************************* 582/*******************************************************************************
571 * 583 *
572 * FUNCTION: acpi_install_gpe_block 584 * FUNCTION: acpi_install_gpe_block
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index 39d742190584..2a32c843cb4a 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("exconfig")
53 53
54/* Local prototypes */ 54/* Local prototypes */
55static acpi_status 55static acpi_status
56acpi_ex_add_table(acpi_native_uint table_index, 56acpi_ex_add_table(u32 table_index,
57 struct acpi_namespace_node *parent_node, 57 struct acpi_namespace_node *parent_node,
58 union acpi_operand_object **ddb_handle); 58 union acpi_operand_object **ddb_handle);
59 59
@@ -73,7 +73,7 @@ acpi_ex_add_table(acpi_native_uint table_index,
73 ******************************************************************************/ 73 ******************************************************************************/
74 74
75static acpi_status 75static acpi_status
76acpi_ex_add_table(acpi_native_uint table_index, 76acpi_ex_add_table(u32 table_index,
77 struct acpi_namespace_node *parent_node, 77 struct acpi_namespace_node *parent_node,
78 union acpi_operand_object **ddb_handle) 78 union acpi_operand_object **ddb_handle)
79{ 79{
@@ -96,7 +96,8 @@ acpi_ex_add_table(acpi_native_uint table_index,
96 96
97 /* Install the new table into the local data structures */ 97 /* Install the new table into the local data structures */
98 98
99 obj_desc->reference.object = ACPI_CAST_PTR(void, table_index); 99 obj_desc->reference.object = ACPI_CAST_PTR(void,
100 (unsigned long)table_index);
100 101
101 /* Add the table to the namespace */ 102 /* Add the table to the namespace */
102 103
@@ -128,12 +129,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
128{ 129{
129 acpi_status status; 130 acpi_status status;
130 union acpi_operand_object **operand = &walk_state->operands[0]; 131 union acpi_operand_object **operand = &walk_state->operands[0];
131 acpi_native_uint table_index;
132 struct acpi_namespace_node *parent_node; 132 struct acpi_namespace_node *parent_node;
133 struct acpi_namespace_node *start_node; 133 struct acpi_namespace_node *start_node;
134 struct acpi_namespace_node *parameter_node = NULL; 134 struct acpi_namespace_node *parameter_node = NULL;
135 union acpi_operand_object *ddb_handle; 135 union acpi_operand_object *ddb_handle;
136 struct acpi_table_header *table; 136 struct acpi_table_header *table;
137 u32 table_index;
137 138
138 ACPI_FUNCTION_TRACE(ex_load_table_op); 139 ACPI_FUNCTION_TRACE(ex_load_table_op);
139 140
@@ -280,7 +281,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
280{ 281{
281 union acpi_operand_object *ddb_handle; 282 union acpi_operand_object *ddb_handle;
282 struct acpi_table_desc table_desc; 283 struct acpi_table_desc table_desc;
283 acpi_native_uint table_index; 284 u32 table_index;
284 acpi_status status; 285 acpi_status status;
285 u32 length; 286 u32 length;
286 287
@@ -437,7 +438,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
437{ 438{
438 acpi_status status = AE_OK; 439 acpi_status status = AE_OK;
439 union acpi_operand_object *table_desc = ddb_handle; 440 union acpi_operand_object *table_desc = ddb_handle;
440 acpi_native_uint table_index; 441 u32 table_index;
441 struct acpi_table_header *table; 442 struct acpi_table_header *table;
442 443
443 ACPI_FUNCTION_TRACE(ex_unload_table); 444 ACPI_FUNCTION_TRACE(ex_unload_table);
@@ -454,9 +455,9 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
454 return_ACPI_STATUS(AE_BAD_PARAMETER); 455 return_ACPI_STATUS(AE_BAD_PARAMETER);
455 } 456 }
456 457
457 /* Get the table index from the ddb_handle */ 458 /* Get the table index from the ddb_handle (acpi_size for 64-bit case) */
458 459
459 table_index = (acpi_native_uint) table_desc->reference.object; 460 table_index = (u32) (acpi_size) table_desc->reference.object;
460 461
461 /* Invoke table handler if present */ 462 /* Invoke table handler if present */
462 463
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index fd954b4ed83d..261d97516d9b 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -288,11 +288,11 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
288 u16 base, u8 * string, u8 data_width) 288 u16 base, u8 * string, u8 data_width)
289{ 289{
290 acpi_integer digit; 290 acpi_integer digit;
291 acpi_native_uint i; 291 u32 i;
292 acpi_native_uint j; 292 u32 j;
293 acpi_native_uint k = 0; 293 u32 k = 0;
294 acpi_native_uint hex_length; 294 u32 hex_length;
295 acpi_native_uint decimal_length; 295 u32 decimal_length;
296 u32 remainder; 296 u32 remainder;
297 u8 supress_zeros; 297 u8 supress_zeros;
298 298
@@ -348,7 +348,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
348 348
349 /* hex_length: 2 ascii hex chars per data byte */ 349 /* hex_length: 2 ascii hex chars per data byte */
350 350
351 hex_length = (acpi_native_uint) ACPI_MUL_2(data_width); 351 hex_length = ACPI_MUL_2(data_width);
352 for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) { 352 for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) {
353 353
354 /* Get one hex digit, most significant digits first */ 354 /* Get one hex digit, most significant digits first */
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index 60e62c4f0577..ad09696d5069 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -45,8 +45,6 @@
45#include <acpi/acinterp.h> 45#include <acpi/acinterp.h>
46#include <acpi/amlcode.h> 46#include <acpi/amlcode.h>
47#include <acpi/acnamesp.h> 47#include <acpi/acnamesp.h>
48#include <acpi/acevents.h>
49#include <acpi/actables.h>
50 48
51#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
52ACPI_MODULE_NAME("excreate") 50ACPI_MODULE_NAME("excreate")
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 74f1b22601b3..2be2e2bf95bf 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -580,25 +580,22 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
580 580
581 case ACPI_TYPE_BUFFER: 581 case ACPI_TYPE_BUFFER:
582 582
583 acpi_os_printf("Buffer len %X @ %p\n", 583 acpi_os_printf("Buffer length %.2X @ %p\n",
584 obj_desc->buffer.length, 584 obj_desc->buffer.length,
585 obj_desc->buffer.pointer); 585 obj_desc->buffer.pointer);
586 586
587 length = obj_desc->buffer.length;
588 if (length > 64) {
589 length = 64;
590 }
591
592 /* Debug only -- dump the buffer contents */ 587 /* Debug only -- dump the buffer contents */
593 588
594 if (obj_desc->buffer.pointer) { 589 if (obj_desc->buffer.pointer) {
595 acpi_os_printf("Buffer Contents: "); 590 length = obj_desc->buffer.length;
596 591 if (length > 128) {
597 for (index = 0; index < length; index++) { 592 length = 128;
598 acpi_os_printf(" %02x",
599 obj_desc->buffer.pointer[index]);
600 } 593 }
601 acpi_os_printf("\n"); 594
595 acpi_os_printf
596 ("Buffer Contents: (displaying length 0x%.2X)\n",
597 length);
598 ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, length);
602 } 599 }
603 break; 600 break;
604 601
@@ -756,54 +753,42 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
756 * 753 *
757 * FUNCTION: acpi_ex_dump_operands 754 * FUNCTION: acpi_ex_dump_operands
758 * 755 *
759 * PARAMETERS: Operands - Operand list 756 * PARAMETERS: Operands - A list of Operand objects
760 * interpreter_mode - Load or Exec 757 * opcode_name - AML opcode name
761 * Ident - Identification 758 * num_operands - Operand count for this opcode
762 * num_levels - # of stack entries to dump above line
763 * Note - Output notation
764 * module_name - Caller's module name
765 * line_number - Caller's invocation line number
766 * 759 *
767 * DESCRIPTION: Dump the object stack 760 * DESCRIPTION: Dump the operands associated with the opcode
768 * 761 *
769 ******************************************************************************/ 762 ******************************************************************************/
770 763
771void 764void
772acpi_ex_dump_operands(union acpi_operand_object **operands, 765acpi_ex_dump_operands(union acpi_operand_object **operands,
773 acpi_interpreter_mode interpreter_mode, 766 const char *opcode_name, u32 num_operands)
774 char *ident,
775 u32 num_levels,
776 char *note, char *module_name, u32 line_number)
777{ 767{
778 acpi_native_uint i;
779
780 ACPI_FUNCTION_NAME(ex_dump_operands); 768 ACPI_FUNCTION_NAME(ex_dump_operands);
781 769
782 if (!ident) { 770 if (!opcode_name) {
783 ident = "?"; 771 opcode_name = "UNKNOWN";
784 }
785
786 if (!note) {
787 note = "?";
788 } 772 }
789 773
790 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 774 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
791 "************* Operand Stack Contents (Opcode [%s], %d Operands)\n", 775 "**** Start operand dump for opcode [%s], %d operands\n",
792 ident, num_levels)); 776 opcode_name, num_operands));
793 777
794 if (num_levels == 0) { 778 if (num_operands == 0) {
795 num_levels = 1; 779 num_operands = 1;
796 } 780 }
797 781
798 /* Dump the operand stack starting at the top */ 782 /* Dump the individual operands */
799 783
800 for (i = 0; num_levels > 0; i--, num_levels--) { 784 while (num_operands) {
801 acpi_ex_dump_operand(operands[i], 0); 785 acpi_ex_dump_operand(*operands, 0);
786 operands++;
787 num_operands--;
802 } 788 }
803 789
804 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 790 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
805 "************* Operand Stack dump from %s(%d), %s\n", 791 "**** End operand dump for [%s]\n", opcode_name));
806 module_name, line_number, note));
807 return; 792 return;
808} 793}
809 794
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c
index e336b5dc7a50..9ff9d1f4615d 100644
--- a/drivers/acpi/executer/exfldio.c
+++ b/drivers/acpi/executer/exfldio.c
@@ -153,14 +153,15 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
153 /* 153 /*
154 * Slack mode only: We will go ahead and allow access to this 154 * Slack mode only: We will go ahead and allow access to this
155 * field if it is within the region length rounded up to the next 155 * field if it is within the region length rounded up to the next
156 * access width boundary. 156 * access width boundary. acpi_size cast for 64-bit compile.
157 */ 157 */
158 if (ACPI_ROUND_UP(rgn_desc->region.length, 158 if (ACPI_ROUND_UP(rgn_desc->region.length,
159 obj_desc->common_field. 159 obj_desc->common_field.
160 access_byte_width) >= 160 access_byte_width) >=
161 (obj_desc->common_field.base_byte_offset + 161 ((acpi_size) obj_desc->common_field.
162 (acpi_native_uint) obj_desc->common_field. 162 base_byte_offset +
163 access_byte_width + field_datum_byte_offset)) { 163 obj_desc->common_field.access_byte_width +
164 field_datum_byte_offset)) {
164 return_ACPI_STATUS(AE_OK); 165 return_ACPI_STATUS(AE_OK);
165 } 166 }
166 } 167 }
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
index cc956a5b5267..731414a581a6 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/executer/exmisc.c
@@ -329,8 +329,8 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
329 329
330 /* Result of two Strings is a String */ 330 /* Result of two Strings is a String */
331 331
332 return_desc = acpi_ut_create_string_object((acpi_size) 332 return_desc = acpi_ut_create_string_object(((acpi_size)
333 (operand0->string. 333 operand0->string.
334 length + 334 length +
335 local_operand1-> 335 local_operand1->
336 string.length)); 336 string.length));
@@ -352,8 +352,8 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
352 352
353 /* Result of two Buffers is a Buffer */ 353 /* Result of two Buffers is a Buffer */
354 354
355 return_desc = acpi_ut_create_buffer_object((acpi_size) 355 return_desc = acpi_ut_create_buffer_object(((acpi_size)
356 (operand0->buffer. 356 operand0->buffer.
357 length + 357 length +
358 local_operand1-> 358 local_operand1->
359 buffer.length)); 359 buffer.length));
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index 3a2f8cd4c62a..5d438c32989d 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -503,11 +503,11 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
503 */ 503 */
504 second_desc = obj_desc->common.next_object; 504 second_desc = obj_desc->common.next_object;
505 second_desc->extra.aml_start = 505 second_desc->extra.aml_start =
506 ((union acpi_parse_object *)(info->data_register_node))-> 506 ACPI_CAST_PTR(union acpi_parse_object,
507 named.data; 507 info->data_register_node)->named.data;
508 second_desc->extra.aml_length = 508 second_desc->extra.aml_length =
509 ((union acpi_parse_object *)(info->data_register_node))-> 509 ACPI_CAST_PTR(union acpi_parse_object,
510 named.length; 510 info->data_register_node)->named.length;
511 511
512 break; 512 break;
513 513
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
index 7cd8bb54fa01..7a41c409ae4d 100644
--- a/drivers/acpi/executer/exregion.c
+++ b/drivers/acpi/executer/exregion.c
@@ -156,7 +156,7 @@ acpi_ex_system_memory_space_handler(u32 function,
156 /* Create a new mapping starting at the address given */ 156 /* Create a new mapping starting at the address given */
157 157
158 mem_info->mapped_logical_address = 158 mem_info->mapped_logical_address =
159 acpi_os_map_memory((acpi_native_uint) address, window_size); 159 acpi_os_map_memory((acpi_physical_address) address, window_size);
160 if (!mem_info->mapped_logical_address) { 160 if (!mem_info->mapped_logical_address) {
161 ACPI_ERROR((AE_INFO, 161 ACPI_ERROR((AE_INFO,
162 "Could not map memory at %8.8X%8.8X, size %X", 162 "Could not map memory at %8.8X%8.8X, size %X",
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index 73e29e566a70..54085f16ec28 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -698,5 +698,9 @@ acpi_ex_resolve_operands(u16 opcode,
698 } 698 }
699 } 699 }
700 700
701 ACPI_DUMP_OPERANDS(walk_state->operands,
702 acpi_ps_get_opcode_name(opcode),
703 walk_state->num_operands);
704
701 return_ACPI_STATUS(status); 705 return_ACPI_STATUS(status);
702} 706}
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
index 76c875bc3154..38b55e352495 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/executer/exstore.c
@@ -343,12 +343,6 @@ acpi_ex_store(union acpi_operand_object *source_desc,
343 acpi_ut_get_object_type_name(dest_desc), 343 acpi_ut_get_object_type_name(dest_desc),
344 dest_desc)); 344 dest_desc));
345 345
346 ACPI_DUMP_STACK_ENTRY(source_desc);
347 ACPI_DUMP_STACK_ENTRY(dest_desc);
348 ACPI_DUMP_OPERANDS(&dest_desc, ACPI_IMODE_EXECUTE, "ExStore",
349 2,
350 "Target is not a Reference or Constant object");
351
352 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 346 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
353 } 347 }
354 348
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 6cf10cbc1eee..55c17afbe669 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -148,7 +148,7 @@ acpi_fan_write_state(struct file *file, const char __user * buffer,
148 int result = 0; 148 int result = 0;
149 struct seq_file *m = file->private_data; 149 struct seq_file *m = file->private_data;
150 struct acpi_device *device = m->private; 150 struct acpi_device *device = m->private;
151 char state_string[12] = { '\0' }; 151 char state_string[3] = { '\0' };
152 152
153 if (count > sizeof(state_string) - 1) 153 if (count > sizeof(state_string) - 1)
154 return -EINVAL; 154 return -EINVAL;
@@ -157,6 +157,12 @@ acpi_fan_write_state(struct file *file, const char __user * buffer,
157 return -EFAULT; 157 return -EFAULT;
158 158
159 state_string[count] = '\0'; 159 state_string[count] = '\0';
160 if ((state_string[0] < '0') || (state_string[0] > '3'))
161 return -EINVAL;
162 if (state_string[1] == '\n')
163 state_string[1] = '\0';
164 if (state_string[1] != '\0')
165 return -EINVAL;
160 166
161 result = acpi_bus_set_power(device->handle, 167 result = acpi_bus_set_power(device->handle,
162 simple_strtoul(state_string, NULL, 0)); 168 simple_strtoul(state_string, NULL, 0));
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 9b227d4dc9c9..0f2dd81736bd 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -166,6 +166,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
166 "firmware_node"); 166 "firmware_node");
167 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, 167 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
168 "physical_node"); 168 "physical_node");
169 if (acpi_dev->wakeup.flags.valid)
170 device_set_wakeup_capable(dev, true);
169 } 171 }
170 172
171 return 0; 173 return 0;
@@ -336,6 +338,9 @@ static int __init acpi_rtc_init(void)
336 if (acpi_disabled) 338 if (acpi_disabled)
337 return 0; 339 return 0;
338 340
341 if (acpi_disabled)
342 return 0;
343
339 if (dev) { 344 if (dev) {
340 rtc_wake_setup(); 345 rtc_wake_setup();
341 rtc_info.wake_on = rtc_wake_on; 346 rtc_info.wake_on = rtc_wake_on;
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
index 14bc4f456ae8..0b80db9d9197 100644
--- a/drivers/acpi/hardware/hwgpe.c
+++ b/drivers/acpi/hardware/hwgpe.c
@@ -55,6 +55,54 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
55 55
56/****************************************************************************** 56/******************************************************************************
57 * 57 *
58 * FUNCTION: acpi_hw_low_disable_gpe
59 *
60 * PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
61 *
62 * RETURN: Status
63 *
64 * DESCRIPTION: Disable a single GPE in the enable register.
65 *
66 ******************************************************************************/
67
68acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
69{
70 struct acpi_gpe_register_info *gpe_register_info;
71 acpi_status status;
72 u32 enable_mask;
73
74 /* Get the info block for the entire GPE register */
75
76 gpe_register_info = gpe_event_info->register_info;
77 if (!gpe_register_info) {
78 return (AE_NOT_EXIST);
79 }
80
81 /* Get current value of the enable register that contains this GPE */
82
83 status = acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, &enable_mask,
84 &gpe_register_info->enable_address);
85 if (ACPI_FAILURE(status)) {
86 return (status);
87 }
88
89 /* Clear just the bit that corresponds to this GPE */
90
91 ACPI_CLEAR_BIT(enable_mask,
92 ((u32) 1 <<
93 (gpe_event_info->gpe_number -
94 gpe_register_info->base_gpe_number)));
95
96 /* Write the updated enable mask */
97
98 status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, enable_mask,
99 &gpe_register_info->enable_address);
100
101 return (status);
102}
103
104/******************************************************************************
105 *
58 * FUNCTION: acpi_hw_write_gpe_enable_reg 106 * FUNCTION: acpi_hw_write_gpe_enable_reg
59 * 107 *
60 * PARAMETERS: gpe_event_info - Info block for the GPE to be enabled 108 * PARAMETERS: gpe_event_info - Info block for the GPE to be enabled
@@ -68,7 +116,7 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
68 ******************************************************************************/ 116 ******************************************************************************/
69 117
70acpi_status 118acpi_status
71acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info) 119acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
72{ 120{
73 struct acpi_gpe_register_info *gpe_register_info; 121 struct acpi_gpe_register_info *gpe_register_info;
74 acpi_status status; 122 acpi_status status;
@@ -138,7 +186,6 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
138 * 186 *
139 ******************************************************************************/ 187 ******************************************************************************/
140 188
141#ifdef ACPI_FUTURE_USAGE
142acpi_status 189acpi_status
143acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, 190acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
144 acpi_event_status * event_status) 191 acpi_event_status * event_status)
@@ -198,7 +245,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
198 unlock_and_exit: 245 unlock_and_exit:
199 return (status); 246 return (status);
200} 247}
201#endif /* ACPI_FUTURE_USAGE */
202 248
203/****************************************************************************** 249/******************************************************************************
204 * 250 *
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index 5445751b8a3e..0ab22004728a 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -73,7 +73,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
73 73
74void acpi_ns_print_pathname(u32 num_segments, char *pathname) 74void acpi_ns_print_pathname(u32 num_segments, char *pathname)
75{ 75{
76 acpi_native_uint i; 76 u32 i;
77 77
78 ACPI_FUNCTION_NAME(ns_print_pathname); 78 ACPI_FUNCTION_NAME(ns_print_pathname);
79 79
@@ -515,12 +515,12 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
515 515
516 if (obj_type > ACPI_TYPE_LOCAL_MAX) { 516 if (obj_type > ACPI_TYPE_LOCAL_MAX) {
517 acpi_os_printf 517 acpi_os_printf
518 ("(Ptr to ACPI Object type %X [UNKNOWN])\n", 518 ("(Pointer to ACPI Object type %.2X [UNKNOWN])\n",
519 obj_type); 519 obj_type);
520 bytes_to_dump = 32; 520 bytes_to_dump = 32;
521 } else { 521 } else {
522 acpi_os_printf 522 acpi_os_printf
523 ("(Ptr to ACPI Object type %X [%s])\n", 523 ("(Pointer to ACPI Object type %.2X [%s])\n",
524 obj_type, acpi_ut_get_type_name(obj_type)); 524 obj_type, acpi_ut_get_type_name(obj_type));
525 bytes_to_dump = 525 bytes_to_dump =
526 sizeof(union acpi_operand_object); 526 sizeof(union acpi_operand_object);
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 14bdfa92bea0..d369164e00b0 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -138,6 +138,41 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
138 return_ACPI_STATUS(AE_NULL_OBJECT); 138 return_ACPI_STATUS(AE_NULL_OBJECT);
139 } 139 }
140 140
141 /*
142 * Calculate the number of arguments being passed to the method
143 */
144
145 info->param_count = 0;
146 if (info->parameters) {
147 while (info->parameters[info->param_count])
148 info->param_count++;
149 }
150
151 /* Error if too few arguments were passed in */
152
153 if (info->param_count < info->obj_desc->method.param_count) {
154 ACPI_ERROR((AE_INFO,
155 "Insufficient arguments - "
156 "method [%4.4s] needs %d, found %d",
157 acpi_ut_get_node_name(info->resolved_node),
158 info->obj_desc->method.param_count,
159 info->param_count));
160 return_ACPI_STATUS(AE_MISSING_ARGUMENTS);
161 }
162
163 /* Just a warning if too many arguments */
164
165 else if (info->param_count >
166 info->obj_desc->method.param_count) {
167 ACPI_WARNING((AE_INFO,
168 "Excess arguments - "
169 "method [%4.4s] needs %d, found %d",
170 acpi_ut_get_node_name(info->
171 resolved_node),
172 info->obj_desc->method.param_count,
173 info->param_count));
174 }
175
141 ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:", 176 ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:",
142 ACPI_LV_INFO, _COMPONENT); 177 ACPI_LV_INFO, _COMPONENT);
143 178
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index 6d6d930c8e18..e4c57510d798 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -542,7 +542,6 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
542 info->prefix_node = device_node; 542 info->prefix_node = device_node;
543 info->pathname = METHOD_NAME__INI; 543 info->pathname = METHOD_NAME__INI;
544 info->parameters = NULL; 544 info->parameters = NULL;
545 info->parameter_type = ACPI_PARAM_ARGS;
546 info->flags = ACPI_IGNORE_RETURN_VALUE; 545 info->flags = ACPI_IGNORE_RETURN_VALUE;
547 546
548 /* 547 /*
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
index 2c92f6cf5ce1..a4a412b7c029 100644
--- a/drivers/acpi/namespace/nsload.c
+++ b/drivers/acpi/namespace/nsload.c
@@ -71,8 +71,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
71 ******************************************************************************/ 71 ******************************************************************************/
72 72
73acpi_status 73acpi_status
74acpi_ns_load_table(acpi_native_uint table_index, 74acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
75 struct acpi_namespace_node *node)
76{ 75{
77 acpi_status status; 76 acpi_status status;
78 77
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c
index 46a79b0103b6..a82271a9dbb3 100644
--- a/drivers/acpi/namespace/nsparse.c
+++ b/drivers/acpi/namespace/nsparse.c
@@ -63,13 +63,13 @@ ACPI_MODULE_NAME("nsparse")
63 * 63 *
64 ******************************************************************************/ 64 ******************************************************************************/
65acpi_status 65acpi_status
66acpi_ns_one_complete_parse(acpi_native_uint pass_number, 66acpi_ns_one_complete_parse(u32 pass_number,
67 acpi_native_uint table_index, 67 u32 table_index,
68 struct acpi_namespace_node * start_node) 68 struct acpi_namespace_node *start_node)
69{ 69{
70 union acpi_parse_object *parse_root; 70 union acpi_parse_object *parse_root;
71 acpi_status status; 71 acpi_status status;
72 acpi_native_uint aml_length; 72 u32 aml_length;
73 u8 *aml_start; 73 u8 *aml_start;
74 struct acpi_walk_state *walk_state; 74 struct acpi_walk_state *walk_state;
75 struct acpi_table_header *table; 75 struct acpi_table_header *table;
@@ -112,8 +112,8 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number,
112 aml_start = (u8 *) table + sizeof(struct acpi_table_header); 112 aml_start = (u8 *) table + sizeof(struct acpi_table_header);
113 aml_length = table->length - sizeof(struct acpi_table_header); 113 aml_length = table->length - sizeof(struct acpi_table_header);
114 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, 114 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
115 aml_start, (u32) aml_length, 115 aml_start, aml_length, NULL,
116 NULL, (u8) pass_number); 116 (u8) pass_number);
117 } 117 }
118 118
119 if (ACPI_FAILURE(status)) { 119 if (ACPI_FAILURE(status)) {
@@ -158,8 +158,7 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number,
158 ******************************************************************************/ 158 ******************************************************************************/
159 159
160acpi_status 160acpi_status
161acpi_ns_parse_table(acpi_native_uint table_index, 161acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
162 struct acpi_namespace_node *start_node)
163{ 162{
164 acpi_status status; 163 acpi_status status;
165 164
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
index 64c039843ed2..b0817e1127b1 100644
--- a/drivers/acpi/namespace/nsutils.c
+++ b/drivers/acpi/namespace/nsutils.c
@@ -73,9 +73,9 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
73 ******************************************************************************/ 73 ******************************************************************************/
74 74
75void 75void
76acpi_ns_report_error(char *module_name, 76acpi_ns_report_error(const char *module_name,
77 u32 line_number, 77 u32 line_number,
78 char *internal_name, acpi_status lookup_status) 78 const char *internal_name, acpi_status lookup_status)
79{ 79{
80 acpi_status status; 80 acpi_status status;
81 u32 bad_name; 81 u32 bad_name;
@@ -130,11 +130,11 @@ acpi_ns_report_error(char *module_name,
130 ******************************************************************************/ 130 ******************************************************************************/
131 131
132void 132void
133acpi_ns_report_method_error(char *module_name, 133acpi_ns_report_method_error(const char *module_name,
134 u32 line_number, 134 u32 line_number,
135 char *message, 135 const char *message,
136 struct acpi_namespace_node *prefix_node, 136 struct acpi_namespace_node *prefix_node,
137 char *path, acpi_status method_status) 137 const char *path, acpi_status method_status)
138{ 138{
139 acpi_status status; 139 acpi_status status;
140 struct acpi_namespace_node *node = prefix_node; 140 struct acpi_namespace_node *node = prefix_node;
@@ -167,7 +167,8 @@ acpi_ns_report_method_error(char *module_name,
167 ******************************************************************************/ 167 ******************************************************************************/
168 168
169void 169void
170acpi_ns_print_node_pathname(struct acpi_namespace_node *node, char *message) 170acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
171 const char *message)
171{ 172{
172 struct acpi_buffer buffer; 173 struct acpi_buffer buffer;
173 acpi_status status; 174 acpi_status status;
@@ -296,7 +297,7 @@ u32 acpi_ns_local(acpi_object_type type)
296 297
297void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info) 298void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
298{ 299{
299 char *next_external_char; 300 const char *next_external_char;
300 u32 i; 301 u32 i;
301 302
302 ACPI_FUNCTION_ENTRY(); 303 ACPI_FUNCTION_ENTRY();
@@ -363,9 +364,9 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
363{ 364{
364 u32 num_segments = info->num_segments; 365 u32 num_segments = info->num_segments;
365 char *internal_name = info->internal_name; 366 char *internal_name = info->internal_name;
366 char *external_name = info->next_external_char; 367 const char *external_name = info->next_external_char;
367 char *result = NULL; 368 char *result = NULL;
368 acpi_native_uint i; 369 u32 i;
369 370
370 ACPI_FUNCTION_TRACE(ns_build_internal_name); 371 ACPI_FUNCTION_TRACE(ns_build_internal_name);
371 372
@@ -400,12 +401,11 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
400 result = &internal_name[i]; 401 result = &internal_name[i];
401 } else if (num_segments == 2) { 402 } else if (num_segments == 2) {
402 internal_name[i] = AML_DUAL_NAME_PREFIX; 403 internal_name[i] = AML_DUAL_NAME_PREFIX;
403 result = &internal_name[(acpi_native_uint) (i + 1)]; 404 result = &internal_name[(acpi_size) i + 1];
404 } else { 405 } else {
405 internal_name[i] = AML_MULTI_NAME_PREFIX_OP; 406 internal_name[i] = AML_MULTI_NAME_PREFIX_OP;
406 internal_name[(acpi_native_uint) (i + 1)] = 407 internal_name[(acpi_size) i + 1] = (char)num_segments;
407 (char)num_segments; 408 result = &internal_name[(acpi_size) i + 2];
408 result = &internal_name[(acpi_native_uint) (i + 2)];
409 } 409 }
410 } 410 }
411 411
@@ -472,7 +472,8 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
472 * 472 *
473 *******************************************************************************/ 473 *******************************************************************************/
474 474
475acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name) 475acpi_status
476acpi_ns_internalize_name(const char *external_name, char **converted_name)
476{ 477{
477 char *internal_name; 478 char *internal_name;
478 struct acpi_namestring_info info; 479 struct acpi_namestring_info info;
@@ -528,15 +529,15 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
528 529
529acpi_status 530acpi_status
530acpi_ns_externalize_name(u32 internal_name_length, 531acpi_ns_externalize_name(u32 internal_name_length,
531 char *internal_name, 532 const char *internal_name,
532 u32 * converted_name_length, char **converted_name) 533 u32 * converted_name_length, char **converted_name)
533{ 534{
534 acpi_native_uint names_index = 0; 535 u32 names_index = 0;
535 acpi_native_uint num_segments = 0; 536 u32 num_segments = 0;
536 acpi_native_uint required_length; 537 u32 required_length;
537 acpi_native_uint prefix_length = 0; 538 u32 prefix_length = 0;
538 acpi_native_uint i = 0; 539 u32 i = 0;
539 acpi_native_uint j = 0; 540 u32 j = 0;
540 541
541 ACPI_FUNCTION_TRACE(ns_externalize_name); 542 ACPI_FUNCTION_TRACE(ns_externalize_name);
542 543
@@ -582,9 +583,8 @@ acpi_ns_externalize_name(u32 internal_name_length,
582 /* <count> 4-byte names */ 583 /* <count> 4-byte names */
583 584
584 names_index = prefix_length + 2; 585 names_index = prefix_length + 2;
585 num_segments = (acpi_native_uint) (u8) 586 num_segments = (u8)
586 internal_name[(acpi_native_uint) 587 internal_name[(acpi_size) prefix_length + 1];
587 (prefix_length + 1)];
588 break; 588 break;
589 589
590 case AML_DUAL_NAME_PREFIX: 590 case AML_DUAL_NAME_PREFIX:
@@ -823,7 +823,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
823 823
824acpi_status 824acpi_status
825acpi_ns_get_node(struct acpi_namespace_node *prefix_node, 825acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
826 char *pathname, 826 const char *pathname,
827 u32 flags, struct acpi_namespace_node **return_node) 827 u32 flags, struct acpi_namespace_node **return_node)
828{ 828{
829 union acpi_generic_state scope_info; 829 union acpi_generic_state scope_info;
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index a8d549187c84..38be5865d95d 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -182,7 +182,6 @@ acpi_evaluate_object(acpi_handle handle,
182 } 182 }
183 183
184 info->pathname = pathname; 184 info->pathname = pathname;
185 info->parameter_type = ACPI_PARAM_ARGS;
186 185
187 /* Convert and validate the device handle */ 186 /* Convert and validate the device handle */
188 187
@@ -442,7 +441,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
442 u32 flags; 441 u32 flags;
443 struct acpica_device_id hid; 442 struct acpica_device_id hid;
444 struct acpi_compatible_id_list *cid; 443 struct acpi_compatible_id_list *cid;
445 acpi_native_uint i; 444 u32 i;
446 int found; 445 int found;
447 446
448 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 447 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 658e5f3abae0..cb9864e39bae 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -120,10 +120,10 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
120 struct acpi_srat_mem_affinity *p = 120 struct acpi_srat_mem_affinity *p =
121 (struct acpi_srat_mem_affinity *)header; 121 (struct acpi_srat_mem_affinity *)header;
122 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 122 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
123 "SRAT Memory (0x%lx length 0x%lx type 0x%x) in proximity domain %d %s%s\n", 123 "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s\n",
124 (unsigned long)p->base_address, 124 (unsigned long)p->base_address,
125 (unsigned long)p->length, 125 (unsigned long)p->length,
126 p->memory_type, p->proximity_domain, 126 p->proximity_domain,
127 (p->flags & ACPI_SRAT_MEM_ENABLED)? 127 (p->flags & ACPI_SRAT_MEM_ENABLED)?
128 "enabled" : "disabled", 128 "enabled" : "disabled",
129 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)? 129 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
index e94463778845..d830b29b85b1 100644
--- a/drivers/acpi/parser/psargs.c
+++ b/drivers/acpi/parser/psargs.c
@@ -76,7 +76,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
76{ 76{
77 u8 *aml = parser_state->aml; 77 u8 *aml = parser_state->aml;
78 u32 package_length = 0; 78 u32 package_length = 0;
79 acpi_native_uint byte_count; 79 u32 byte_count;
80 u8 byte_zero_mask = 0x3F; /* Default [0:5] */ 80 u8 byte_zero_mask = 0x3F; /* Default [0:5] */
81 81
82 ACPI_FUNCTION_TRACE(ps_get_next_package_length); 82 ACPI_FUNCTION_TRACE(ps_get_next_package_length);
@@ -86,7 +86,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
86 * used to encode the package length, either 0,1,2, or 3 86 * used to encode the package length, either 0,1,2, or 3
87 */ 87 */
88 byte_count = (aml[0] >> 6); 88 byte_count = (aml[0] >> 6);
89 parser_state->aml += (byte_count + 1); 89 parser_state->aml += ((acpi_size) byte_count + 1);
90 90
91 /* Get bytes 3, 2, 1 as needed */ 91 /* Get bytes 3, 2, 1 as needed */
92 92
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
index 52581454c47c..270469aae842 100644
--- a/drivers/acpi/parser/psxface.c
+++ b/drivers/acpi/parser/psxface.c
@@ -333,9 +333,9 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
333static void 333static void
334acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action) 334acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
335{ 335{
336 acpi_native_uint i; 336 u32 i;
337 337
338 if ((info->parameter_type == ACPI_PARAM_ARGS) && (info->parameters)) { 338 if (info->parameters) {
339 339
340 /* Update reference count for each parameter */ 340 /* Update reference count for each parameter */
341 341
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 89022a74faee..11acaee14d66 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -162,7 +162,7 @@ do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt)
162 !strcmp(prt->source, quirk->source) && 162 !strcmp(prt->source, quirk->source) &&
163 strlen(prt->source) >= strlen(quirk->actual_source)) { 163 strlen(prt->source) >= strlen(quirk->actual_source)) {
164 printk(KERN_WARNING PREFIX "firmware reports " 164 printk(KERN_WARNING PREFIX "firmware reports "
165 "%04x:%02x:%02x[%c] connected to %s; " 165 "%04x:%02x:%02x PCI INT %c connected to %s; "
166 "changing to %s\n", 166 "changing to %s\n",
167 entry->id.segment, entry->id.bus, 167 entry->id.segment, entry->id.bus,
168 entry->id.device, 'A' + entry->pin, 168 entry->id.device, 'A' + entry->pin,
@@ -429,7 +429,7 @@ acpi_pci_irq_derive(struct pci_dev *dev,
429{ 429{
430 struct pci_dev *bridge = dev; 430 struct pci_dev *bridge = dev;
431 int irq = -1; 431 int irq = -1;
432 u8 bridge_pin = 0; 432 u8 bridge_pin = 0, orig_pin = pin;
433 433
434 434
435 if (!dev) 435 if (!dev)
@@ -463,8 +463,8 @@ acpi_pci_irq_derive(struct pci_dev *dev,
463 } 463 }
464 464
465 if (irq < 0) { 465 if (irq < 0) {
466 printk(KERN_WARNING PREFIX "Unable to derive IRQ for device %s\n", 466 dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n",
467 pci_name(dev)); 467 'A' + orig_pin);
468 return -1; 468 return -1;
469 } 469 }
470 470
@@ -487,6 +487,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
487 int triggering = ACPI_LEVEL_SENSITIVE; 487 int triggering = ACPI_LEVEL_SENSITIVE;
488 int polarity = ACPI_ACTIVE_LOW; 488 int polarity = ACPI_ACTIVE_LOW;
489 char *link = NULL; 489 char *link = NULL;
490 char link_desc[16];
490 int rc; 491 int rc;
491 492
492 493
@@ -503,7 +504,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
503 pin--; 504 pin--;
504 505
505 if (!dev->bus) { 506 if (!dev->bus) {
506 printk(KERN_ERR PREFIX "Invalid (NULL) 'bus' field\n"); 507 dev_err(&dev->dev, "invalid (NULL) 'bus' field\n");
507 return -ENODEV; 508 return -ENODEV;
508 } 509 }
509 510
@@ -538,8 +539,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
538 * driver reported one, then use it. Exit in any case. 539 * driver reported one, then use it. Exit in any case.
539 */ 540 */
540 if (irq < 0) { 541 if (irq < 0) {
541 printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI", 542 dev_warn(&dev->dev, "PCI INT %c: no GSI", 'A' + pin);
542 pci_name(dev), ('A' + pin));
543 /* Interrupt Line values above 0xF are forbidden */ 543 /* Interrupt Line values above 0xF are forbidden */
544 if (dev->irq > 0 && (dev->irq <= 0xF)) { 544 if (dev->irq > 0 && (dev->irq <= 0xF)) {
545 printk(" - using IRQ %d\n", dev->irq); 545 printk(" - using IRQ %d\n", dev->irq);
@@ -554,21 +554,21 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
554 554
555 rc = acpi_register_gsi(irq, triggering, polarity); 555 rc = acpi_register_gsi(irq, triggering, polarity);
556 if (rc < 0) { 556 if (rc < 0) {
557 printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: failed " 557 dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
558 "to register GSI\n", pci_name(dev), ('A' + pin)); 558 'A' + pin);
559 return rc; 559 return rc;
560 } 560 }
561 dev->irq = rc; 561 dev->irq = rc;
562 562
563 printk(KERN_INFO PREFIX "PCI Interrupt %s[%c] -> ",
564 pci_name(dev), 'A' + pin);
565
566 if (link) 563 if (link)
567 printk("Link [%s] -> ", link); 564 snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
565 else
566 link_desc[0] = '\0';
568 567
569 printk("GSI %u (%s, %s) -> IRQ %d\n", irq, 568 dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
570 (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", 569 'A' + pin, link_desc, irq,
571 (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); 570 (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
571 (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
572 572
573 return 0; 573 return 0;
574} 574}
@@ -616,10 +616,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
616 * (e.g. PCI_UNDEFINED_IRQ). 616 * (e.g. PCI_UNDEFINED_IRQ).
617 */ 617 */
618 618
619 printk(KERN_INFO PREFIX "PCI interrupt for device %s disabled\n", 619 dev_info(&dev->dev, "PCI INT %c disabled\n", 'A' + pin);
620 pci_name(dev));
621
622 acpi_unregister_gsi(gsi); 620 acpi_unregister_gsi(gsi);
623
624 return;
625} 621}
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
new file mode 100644
index 000000000000..b9ab030a52d5
--- /dev/null
+++ b/drivers/acpi/pci_slot.c
@@ -0,0 +1,368 @@
1/*
2 * pci_slot.c - ACPI PCI Slot Driver
3 *
4 * The code here is heavily leveraged from the acpiphp module.
5 * Thanks to Matthew Wilcox <matthew@wil.cx> for much guidance.
6 * Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code
7 * review and fixes.
8 *
9 * Copyright (C) 2007 Alex Chiang <achiang@hp.com>
10 * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/types.h>
30#include <linux/pci.h>
31#include <linux/acpi.h>
32#include <acpi/acpi_bus.h>
33#include <acpi/acpi_drivers.h>
34
35static int debug;
36static int check_sta_before_sun;
37
38#define DRIVER_VERSION "0.1"
39#define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>"
40#define DRIVER_DESC "ACPI PCI Slot Detection Driver"
41MODULE_AUTHOR(DRIVER_AUTHOR);
42MODULE_DESCRIPTION(DRIVER_DESC);
43MODULE_LICENSE("GPL");
44MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
45module_param(debug, bool, 0644);
46
47#define _COMPONENT ACPI_PCI_COMPONENT
48ACPI_MODULE_NAME("pci_slot");
49
50#define MY_NAME "pci_slot"
51#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
52#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
53#define dbg(format, arg...) \
54 do { \
55 if (debug) \
56 printk(KERN_DEBUG "%s: " format, \
57 MY_NAME , ## arg); \
58 } while (0)
59
60#define SLOT_NAME_SIZE 20 /* Inspired by #define in acpiphp.h */
61
62struct acpi_pci_slot {
63 acpi_handle root_handle; /* handle of the root bridge */
64 struct pci_slot *pci_slot; /* corresponding pci_slot */
65 struct list_head list; /* node in the list of slots */
66};
67
68static int acpi_pci_slot_add(acpi_handle handle);
69static void acpi_pci_slot_remove(acpi_handle handle);
70
71static LIST_HEAD(slot_list);
72static DEFINE_MUTEX(slot_list_lock);
73static struct acpi_pci_driver acpi_pci_slot_driver = {
74 .add = acpi_pci_slot_add,
75 .remove = acpi_pci_slot_remove,
76};
77
78static int
79check_slot(acpi_handle handle, int *device, unsigned long *sun)
80{
81 int retval = 0;
82 unsigned long adr, sta;
83 acpi_status status;
84 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
85
86 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
87 dbg("Checking slot on path: %s\n", (char *)buffer.pointer);
88
89 if (check_sta_before_sun) {
90 /* If SxFy doesn't have _STA, we just assume it's there */
91 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
92 if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT)) {
93 retval = -1;
94 goto out;
95 }
96 }
97
98 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
99 if (ACPI_FAILURE(status)) {
100 dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer);
101 retval = -1;
102 goto out;
103 }
104
105 *device = (adr >> 16) & 0xffff;
106
107 /* No _SUN == not a slot == bail */
108 status = acpi_evaluate_integer(handle, "_SUN", NULL, sun);
109 if (ACPI_FAILURE(status)) {
110 dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer);
111 retval = -1;
112 goto out;
113 }
114
115out:
116 kfree(buffer.pointer);
117 return retval;
118}
119
120struct callback_args {
121 acpi_walk_callback user_function; /* only for walk_p2p_bridge */
122 struct pci_bus *pci_bus;
123 acpi_handle root_handle;
124};
125
126/*
127 * register_slot
128 *
129 * Called once for each SxFy object in the namespace. Don't worry about
130 * calling pci_create_slot multiple times for the same pci_bus:device,
131 * since each subsequent call simply bumps the refcount on the pci_slot.
132 *
133 * The number of calls to pci_destroy_slot from unregister_slot is
134 * symmetrical.
135 */
136static acpi_status
137register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
138{
139 int device;
140 unsigned long sun;
141 char name[SLOT_NAME_SIZE];
142 struct acpi_pci_slot *slot;
143 struct pci_slot *pci_slot;
144 struct callback_args *parent_context = context;
145 struct pci_bus *pci_bus = parent_context->pci_bus;
146
147 if (check_slot(handle, &device, &sun))
148 return AE_OK;
149
150 slot = kmalloc(sizeof(*slot), GFP_KERNEL);
151 if (!slot) {
152 err("%s: cannot allocate memory\n", __func__);
153 return AE_OK;
154 }
155
156 snprintf(name, sizeof(name), "%u", (u32)sun);
157 pci_slot = pci_create_slot(pci_bus, device, name);
158 if (IS_ERR(pci_slot)) {
159 err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
160 kfree(slot);
161 }
162
163 slot->root_handle = parent_context->root_handle;
164 slot->pci_slot = pci_slot;
165 INIT_LIST_HEAD(&slot->list);
166 mutex_lock(&slot_list_lock);
167 list_add(&slot->list, &slot_list);
168 mutex_unlock(&slot_list_lock);
169
170 dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n",
171 pci_slot, pci_bus->number, device, name);
172
173 return AE_OK;
174}
175
176/*
177 * walk_p2p_bridge - discover and walk p2p bridges
178 * @handle: points to an acpi_pci_root
179 * @context: p2p_bridge_context pointer
180 *
181 * Note that when we call ourselves recursively, we pass a different
182 * value of pci_bus in the child_context.
183 */
184static acpi_status
185walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
186{
187 int device, function;
188 unsigned long adr;
189 acpi_status status;
190 acpi_handle dummy_handle;
191 acpi_walk_callback user_function;
192
193 struct pci_dev *dev;
194 struct pci_bus *pci_bus;
195 struct callback_args child_context;
196 struct callback_args *parent_context = context;
197
198 pci_bus = parent_context->pci_bus;
199 user_function = parent_context->user_function;
200
201 status = acpi_get_handle(handle, "_ADR", &dummy_handle);
202 if (ACPI_FAILURE(status))
203 return AE_OK;
204
205 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
206 if (ACPI_FAILURE(status))
207 return AE_OK;
208
209 device = (adr >> 16) & 0xffff;
210 function = adr & 0xffff;
211
212 dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
213 if (!dev || !dev->subordinate)
214 goto out;
215
216 child_context.pci_bus = dev->subordinate;
217 child_context.user_function = user_function;
218 child_context.root_handle = parent_context->root_handle;
219
220 dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number);
221 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
222 user_function, &child_context, NULL);
223 if (ACPI_FAILURE(status))
224 goto out;
225
226 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
227 walk_p2p_bridge, &child_context, NULL);
228out:
229 pci_dev_put(dev);
230 return AE_OK;
231}
232
233/*
234 * walk_root_bridge - generic root bridge walker
235 * @handle: points to an acpi_pci_root
236 * @user_function: user callback for slot objects
237 *
238 * Call user_function for all objects underneath this root bridge.
239 * Walk p2p bridges underneath us and call user_function on those too.
240 */
241static int
242walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function)
243{
244 int seg, bus;
245 unsigned long tmp;
246 acpi_status status;
247 acpi_handle dummy_handle;
248 struct pci_bus *pci_bus;
249 struct callback_args context;
250
251 /* If the bridge doesn't have _STA, we assume it is always there */
252 status = acpi_get_handle(handle, "_STA", &dummy_handle);
253 if (ACPI_SUCCESS(status)) {
254 status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp);
255 if (ACPI_FAILURE(status)) {
256 info("%s: _STA evaluation failure\n", __func__);
257 return 0;
258 }
259 if ((tmp & ACPI_STA_DEVICE_FUNCTIONING) == 0)
260 /* don't register this object */
261 return 0;
262 }
263
264 status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp);
265 seg = ACPI_SUCCESS(status) ? tmp : 0;
266
267 status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp);
268 bus = ACPI_SUCCESS(status) ? tmp : 0;
269
270 pci_bus = pci_find_bus(seg, bus);
271 if (!pci_bus)
272 return 0;
273
274 context.pci_bus = pci_bus;
275 context.user_function = user_function;
276 context.root_handle = handle;
277
278 dbg("root bridge walk, pci_bus = %x\n", pci_bus->number);
279 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
280 user_function, &context, NULL);
281 if (ACPI_FAILURE(status))
282 return status;
283
284 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
285 walk_p2p_bridge, &context, NULL);
286 if (ACPI_FAILURE(status))
287 err("%s: walk_p2p_bridge failure - %d\n", __func__, status);
288
289 return status;
290}
291
292/*
293 * acpi_pci_slot_add
294 * @handle: points to an acpi_pci_root
295 */
296static int
297acpi_pci_slot_add(acpi_handle handle)
298{
299 acpi_status status;
300
301 status = walk_root_bridge(handle, register_slot);
302 if (ACPI_FAILURE(status))
303 err("%s: register_slot failure - %d\n", __func__, status);
304
305 return status;
306}
307
308/*
309 * acpi_pci_slot_remove
310 * @handle: points to an acpi_pci_root
311 */
312static void
313acpi_pci_slot_remove(acpi_handle handle)
314{
315 struct acpi_pci_slot *slot, *tmp;
316
317 mutex_lock(&slot_list_lock);
318 list_for_each_entry_safe(slot, tmp, &slot_list, list) {
319 if (slot->root_handle == handle) {
320 list_del(&slot->list);
321 pci_destroy_slot(slot->pci_slot);
322 kfree(slot);
323 }
324 }
325 mutex_unlock(&slot_list_lock);
326}
327
328static int do_sta_before_sun(const struct dmi_system_id *d)
329{
330 info("%s detected: will evaluate _STA before calling _SUN\n", d->ident);
331 check_sta_before_sun = 1;
332 return 0;
333}
334
335static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
336 /*
337 * Fujitsu Primequest machines will return 1023 to indicate an
338 * error if the _SUN method is evaluated on SxFy objects that
339 * are not present (as indicated by _STA), so for those machines,
340 * we want to check _STA before evaluating _SUN.
341 */
342 {
343 .callback = do_sta_before_sun,
344 .ident = "Fujitsu PRIMEQUEST",
345 .matches = {
346 DMI_MATCH(DMI_BIOS_VENDOR, "FUJITSU LIMITED"),
347 DMI_MATCH(DMI_BIOS_VERSION, "PRIMEQUEST"),
348 },
349 },
350 {}
351};
352
353static int __init
354acpi_pci_slot_init(void)
355{
356 dmi_check_system(acpi_pci_slot_dmi_table);
357 acpi_pci_register_driver(&acpi_pci_slot_driver);
358 return 0;
359}
360
361static void __exit
362acpi_pci_slot_exit(void)
363{
364 acpi_pci_unregister_driver(&acpi_pci_slot_driver);
365}
366
367module_init(acpi_pci_slot_init);
368module_exit(acpi_pci_slot_exit);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 81e4f081a4ae..4ab21cb1c8c7 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -292,69 +292,135 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
292 return 0; 292 return 0;
293} 293}
294 294
295/**
296 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
297 * ACPI 3.0) _PSW (Power State Wake)
298 * @dev: Device to handle.
299 * @enable: 0 - disable, 1 - enable the wake capabilities of the device.
300 * @sleep_state: Target sleep state of the system.
301 * @dev_state: Target power state of the device.
302 *
303 * Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
304 * State Wake) for the device, if present. On failure reset the device's
305 * wakeup.flags.valid flag.
306 *
307 * RETURN VALUE:
308 * 0 if either _DSW or _PSW has been successfully executed
309 * 0 if neither _DSW nor _PSW has been found
310 * -ENODEV if the execution of either _DSW or _PSW has failed
311 */
312int acpi_device_sleep_wake(struct acpi_device *dev,
313 int enable, int sleep_state, int dev_state)
314{
315 union acpi_object in_arg[3];
316 struct acpi_object_list arg_list = { 3, in_arg };
317 acpi_status status = AE_OK;
318
319 /*
320 * Try to execute _DSW first.
321 *
322 * Three agruments are needed for the _DSW object:
323 * Argument 0: enable/disable the wake capabilities
324 * Argument 1: target system state
325 * Argument 2: target device state
326 * When _DSW object is called to disable the wake capabilities, maybe
327 * the first argument is filled. The values of the other two agruments
328 * are meaningless.
329 */
330 in_arg[0].type = ACPI_TYPE_INTEGER;
331 in_arg[0].integer.value = enable;
332 in_arg[1].type = ACPI_TYPE_INTEGER;
333 in_arg[1].integer.value = sleep_state;
334 in_arg[2].type = ACPI_TYPE_INTEGER;
335 in_arg[2].integer.value = dev_state;
336 status = acpi_evaluate_object(dev->handle, "_DSW", &arg_list, NULL);
337 if (ACPI_SUCCESS(status)) {
338 return 0;
339 } else if (status != AE_NOT_FOUND) {
340 printk(KERN_ERR PREFIX "_DSW execution failed\n");
341 dev->wakeup.flags.valid = 0;
342 return -ENODEV;
343 }
344
345 /* Execute _PSW */
346 arg_list.count = 1;
347 in_arg[0].integer.value = enable;
348 status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL);
349 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
350 printk(KERN_ERR PREFIX "_PSW execution failed\n");
351 dev->wakeup.flags.valid = 0;
352 return -ENODEV;
353 }
354
355 return 0;
356}
357
295/* 358/*
296 * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229): 359 * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229):
297 * 1. Power on the power resources required for the wakeup device 360 * 1. Power on the power resources required for the wakeup device
298 * 2. Enable _PSW (power state wake) for the device if present 361 * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
362 * State Wake) for the device, if present
299 */ 363 */
300int acpi_enable_wakeup_device_power(struct acpi_device *dev) 364int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
301{ 365{
302 union acpi_object arg = { ACPI_TYPE_INTEGER }; 366 int i, err;
303 struct acpi_object_list arg_list = { 1, &arg };
304 acpi_status status = AE_OK;
305 int i;
306 int ret = 0;
307 367
308 if (!dev || !dev->wakeup.flags.valid) 368 if (!dev || !dev->wakeup.flags.valid)
309 return -1; 369 return -EINVAL;
370
371 /*
372 * Do not execute the code below twice in a row without calling
373 * acpi_disable_wakeup_device_power() in between for the same device
374 */
375 if (dev->wakeup.flags.prepared)
376 return 0;
310 377
311 arg.integer.value = 1;
312 /* Open power resource */ 378 /* Open power resource */
313 for (i = 0; i < dev->wakeup.resources.count; i++) { 379 for (i = 0; i < dev->wakeup.resources.count; i++) {
314 ret = acpi_power_on(dev->wakeup.resources.handles[i], dev); 380 int ret = acpi_power_on(dev->wakeup.resources.handles[i], dev);
315 if (ret) { 381 if (ret) {
316 printk(KERN_ERR PREFIX "Transition power state\n"); 382 printk(KERN_ERR PREFIX "Transition power state\n");
317 dev->wakeup.flags.valid = 0; 383 dev->wakeup.flags.valid = 0;
318 return -1; 384 return -ENODEV;
319 } 385 }
320 } 386 }
321 387
322 /* Execute PSW */ 388 /*
323 status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); 389 * Passing 3 as the third argument below means the device may be placed
324 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { 390 * in arbitrary power state afterwards.
325 printk(KERN_ERR PREFIX "Evaluate _PSW\n"); 391 */
326 dev->wakeup.flags.valid = 0; 392 err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
327 ret = -1; 393 if (!err)
328 } 394 dev->wakeup.flags.prepared = 1;
329 395
330 return ret; 396 return err;
331} 397}
332 398
333/* 399/*
334 * Shutdown a wakeup device, counterpart of above method 400 * Shutdown a wakeup device, counterpart of above method
335 * 1. Disable _PSW (power state wake) 401 * 1. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
402 * State Wake) for the device, if present
336 * 2. Shutdown down the power resources 403 * 2. Shutdown down the power resources
337 */ 404 */
338int acpi_disable_wakeup_device_power(struct acpi_device *dev) 405int acpi_disable_wakeup_device_power(struct acpi_device *dev)
339{ 406{
340 union acpi_object arg = { ACPI_TYPE_INTEGER }; 407 int i, ret;
341 struct acpi_object_list arg_list = { 1, &arg };
342 acpi_status status = AE_OK;
343 int i;
344 int ret = 0;
345
346 408
347 if (!dev || !dev->wakeup.flags.valid) 409 if (!dev || !dev->wakeup.flags.valid)
348 return -1; 410 return -EINVAL;
349 411
350 arg.integer.value = 0; 412 /*
351 /* Execute PSW */ 413 * Do not execute the code below twice in a row without calling
352 status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); 414 * acpi_enable_wakeup_device_power() in between for the same device
353 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { 415 */
354 printk(KERN_ERR PREFIX "Evaluate _PSW\n"); 416 if (!dev->wakeup.flags.prepared)
355 dev->wakeup.flags.valid = 0; 417 return 0;
356 return -1; 418
357 } 419 dev->wakeup.flags.prepared = 0;
420
421 ret = acpi_device_sleep_wake(dev, 0, 0, 0);
422 if (ret)
423 return ret;
358 424
359 /* Close power resource */ 425 /* Close power resource */
360 for (i = 0; i < dev->wakeup.resources.count; i++) { 426 for (i = 0; i < dev->wakeup.resources.count; i++) {
@@ -362,7 +428,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
362 if (ret) { 428 if (ret) {
363 printk(KERN_ERR PREFIX "Transition power state\n"); 429 printk(KERN_ERR PREFIX "Transition power state\n");
364 dev->wakeup.flags.valid = 0; 430 dev->wakeup.flags.valid = 0;
365 return -1; 431 return -ENODEV;
366 } 432 }
367 } 433 }
368 434
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 9dd0fa93b9e1..ec0f2d581ece 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -118,8 +118,31 @@ static const struct file_operations acpi_processor_info_fops = {
118 .release = single_release, 118 .release = single_release,
119}; 119};
120 120
121struct acpi_processor *processors[NR_CPUS]; 121DEFINE_PER_CPU(struct acpi_processor *, processors);
122struct acpi_processor_errata errata __read_mostly; 122struct acpi_processor_errata errata __read_mostly;
123static int set_no_mwait(const struct dmi_system_id *id)
124{
125 printk(KERN_NOTICE PREFIX "%s detected - "
126 "disable mwait for CPU C-stetes\n", id->ident);
127 idle_nomwait = 1;
128 return 0;
129}
130
131static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
132 {
133 set_no_mwait, "IFL91 board", {
134 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
135 DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
136 DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
137 DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
138 {
139 set_no_mwait, "Extensa 5220", {
140 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
141 DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
142 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
143 DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
144 {},
145};
123 146
124/* -------------------------------------------------------------------------- 147/* --------------------------------------------------------------------------
125 Errata Handling 148 Errata Handling
@@ -265,7 +288,20 @@ static int acpi_processor_set_pdc(struct acpi_processor *pr)
265 288
266 if (!pdc_in) 289 if (!pdc_in)
267 return status; 290 return status;
291 if (idle_nomwait) {
292 /*
293 * If mwait is disabled for CPU C-states, the C2C3_FFH access
294 * mode will be disabled in the parameter of _PDC object.
295 * Of course C1_FFH access mode will also be disabled.
296 */
297 union acpi_object *obj;
298 u32 *buffer = NULL;
268 299
300 obj = pdc_in->pointer;
301 buffer = (u32 *)(obj->buffer.pointer);
302 buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
303
304 }
269 status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL); 305 status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
270 306
271 if (ACPI_FAILURE(status)) 307 if (ACPI_FAILURE(status))
@@ -614,14 +650,14 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
614 return 0; 650 return 0;
615} 651}
616 652
617static void *processor_device_array[NR_CPUS]; 653static DEFINE_PER_CPU(void *, processor_device_array);
618 654
619static int __cpuinit acpi_processor_start(struct acpi_device *device) 655static int __cpuinit acpi_processor_start(struct acpi_device *device)
620{ 656{
621 int result = 0; 657 int result = 0;
622 acpi_status status = AE_OK; 658 acpi_status status = AE_OK;
623 struct acpi_processor *pr; 659 struct acpi_processor *pr;
624 660 struct sys_device *sysdev;
625 661
626 pr = acpi_driver_data(device); 662 pr = acpi_driver_data(device);
627 663
@@ -638,20 +674,24 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
638 * ACPI id of processors can be reported wrongly by the BIOS. 674 * ACPI id of processors can be reported wrongly by the BIOS.
639 * Don't trust it blindly 675 * Don't trust it blindly
640 */ 676 */
641 if (processor_device_array[pr->id] != NULL && 677 if (per_cpu(processor_device_array, pr->id) != NULL &&
642 processor_device_array[pr->id] != device) { 678 per_cpu(processor_device_array, pr->id) != device) {
643 printk(KERN_WARNING "BIOS reported wrong ACPI id " 679 printk(KERN_WARNING "BIOS reported wrong ACPI id "
644 "for the processor\n"); 680 "for the processor\n");
645 return -ENODEV; 681 return -ENODEV;
646 } 682 }
647 processor_device_array[pr->id] = device; 683 per_cpu(processor_device_array, pr->id) = device;
648 684
649 processors[pr->id] = pr; 685 per_cpu(processors, pr->id) = pr;
650 686
651 result = acpi_processor_add_fs(device); 687 result = acpi_processor_add_fs(device);
652 if (result) 688 if (result)
653 goto end; 689 goto end;
654 690
691 sysdev = get_cpu_sysdev(pr->id);
692 if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
693 return -EFAULT;
694
655 status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 695 status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
656 acpi_processor_notify, pr); 696 acpi_processor_notify, pr);
657 697
@@ -749,7 +789,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
749 unsigned long action, void *hcpu) 789 unsigned long action, void *hcpu)
750{ 790{
751 unsigned int cpu = (unsigned long)hcpu; 791 unsigned int cpu = (unsigned long)hcpu;
752 struct acpi_processor *pr = processors[cpu]; 792 struct acpi_processor *pr = per_cpu(processors, cpu);
753 793
754 if (action == CPU_ONLINE && pr) { 794 if (action == CPU_ONLINE && pr) {
755 acpi_processor_ppc_has_changed(pr); 795 acpi_processor_ppc_has_changed(pr);
@@ -810,6 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
810 status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 850 status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
811 acpi_processor_notify); 851 acpi_processor_notify);
812 852
853 sysfs_remove_link(&device->dev.kobj, "sysdev");
854
813 acpi_processor_remove_fs(device); 855 acpi_processor_remove_fs(device);
814 856
815 if (pr->cdev) { 857 if (pr->cdev) {
@@ -819,8 +861,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
819 pr->cdev = NULL; 861 pr->cdev = NULL;
820 } 862 }
821 863
822 processors[pr->id] = NULL; 864 per_cpu(processors, pr->id) = NULL;
823 processor_device_array[pr->id] = NULL; 865 per_cpu(processor_device_array, pr->id) = NULL;
824 kfree(pr); 866 kfree(pr);
825 867
826 return 0; 868 return 0;
@@ -1014,9 +1056,9 @@ static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1014 1056
1015static int acpi_processor_handle_eject(struct acpi_processor *pr) 1057static int acpi_processor_handle_eject(struct acpi_processor *pr)
1016{ 1058{
1017 if (cpu_online(pr->id)) { 1059 if (cpu_online(pr->id))
1018 return (-EINVAL); 1060 cpu_down(pr->id);
1019 } 1061
1020 arch_unregister_cpu(pr->id); 1062 arch_unregister_cpu(pr->id);
1021 acpi_unmap_lsapic(pr->id); 1063 acpi_unmap_lsapic(pr->id);
1022 return (0); 1064 return (0);
@@ -1068,8 +1110,6 @@ static int __init acpi_processor_init(void)
1068{ 1110{
1069 int result = 0; 1111 int result = 0;
1070 1112
1071
1072 memset(&processors, 0, sizeof(processors));
1073 memset(&errata, 0, sizeof(errata)); 1113 memset(&errata, 0, sizeof(errata));
1074 1114
1075#ifdef CONFIG_SMP 1115#ifdef CONFIG_SMP
@@ -1083,6 +1123,11 @@ static int __init acpi_processor_init(void)
1083 return -ENOMEM; 1123 return -ENOMEM;
1084 acpi_processor_dir->owner = THIS_MODULE; 1124 acpi_processor_dir->owner = THIS_MODULE;
1085 1125
1126 /*
1127 * Check whether the system is DMI table. If yes, OSPM
1128 * should not use mwait for CPU-states.
1129 */
1130 dmi_check_system(processor_idle_dmi_table);
1086 result = cpuidle_register_driver(&acpi_idle_driver); 1131 result = cpuidle_register_driver(&acpi_idle_driver);
1087 if (result < 0) 1132 if (result < 0)
1088 goto out_proc; 1133 goto out_proc;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 4976e5db2b3f..d592dbb1d12a 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -41,6 +41,7 @@
41#include <linux/pm_qos_params.h> 41#include <linux/pm_qos_params.h>
42#include <linux/clockchips.h> 42#include <linux/clockchips.h>
43#include <linux/cpuidle.h> 43#include <linux/cpuidle.h>
44#include <linux/cpuidle.h>
44 45
45/* 46/*
46 * Include the apic definitions for x86 to have the APIC timer related defines 47 * Include the apic definitions for x86 to have the APIC timer related defines
@@ -57,6 +58,7 @@
57 58
58#include <acpi/acpi_bus.h> 59#include <acpi/acpi_bus.h>
59#include <acpi/processor.h> 60#include <acpi/processor.h>
61#include <asm/processor.h>
60 62
61#define ACPI_PROCESSOR_COMPONENT 0x01000000 63#define ACPI_PROCESSOR_COMPONENT 0x01000000
62#define ACPI_PROCESSOR_CLASS "processor" 64#define ACPI_PROCESSOR_CLASS "processor"
@@ -401,7 +403,7 @@ static void acpi_processor_idle(void)
401 */ 403 */
402 local_irq_disable(); 404 local_irq_disable();
403 405
404 pr = processors[smp_processor_id()]; 406 pr = __get_cpu_var(processors);
405 if (!pr) { 407 if (!pr) {
406 local_irq_enable(); 408 local_irq_enable();
407 return; 409 return;
@@ -955,6 +957,21 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
955 } else { 957 } else {
956 continue; 958 continue;
957 } 959 }
960 if (cx.type == ACPI_STATE_C1 &&
961 (idle_halt || idle_nomwait)) {
962 /*
963 * In most cases the C1 space_id obtained from
964 * _CST object is FIXED_HARDWARE access mode.
965 * But when the option of idle=halt is added,
966 * the entry_method type should be changed from
967 * CSTATE_FFH to CSTATE_HALT.
968 * When the option of idle=nomwait is added,
969 * the C1 entry_method type should be
970 * CSTATE_HALT.
971 */
972 cx.entry_method = ACPI_CSTATE_HALT;
973 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
974 }
958 } else { 975 } else {
959 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 976 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
960 cx.address); 977 cx.address);
@@ -1431,7 +1448,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1431 struct acpi_processor *pr; 1448 struct acpi_processor *pr;
1432 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 1449 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1433 1450
1434 pr = processors[smp_processor_id()]; 1451 pr = __get_cpu_var(processors);
1435 1452
1436 if (unlikely(!pr)) 1453 if (unlikely(!pr))
1437 return 0; 1454 return 0;
@@ -1471,7 +1488,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1471 u32 t1, t2; 1488 u32 t1, t2;
1472 int sleep_ticks = 0; 1489 int sleep_ticks = 0;
1473 1490
1474 pr = processors[smp_processor_id()]; 1491 pr = __get_cpu_var(processors);
1475 1492
1476 if (unlikely(!pr)) 1493 if (unlikely(!pr))
1477 return 0; 1494 return 0;
@@ -1549,7 +1566,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1549 u32 t1, t2; 1566 u32 t1, t2;
1550 int sleep_ticks = 0; 1567 int sleep_ticks = 0;
1551 1568
1552 pr = processors[smp_processor_id()]; 1569 pr = __get_cpu_var(processors);
1553 1570
1554 if (unlikely(!pr)) 1571 if (unlikely(!pr))
1555 return 0; 1572 return 0;
@@ -1780,6 +1797,15 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1780 return 0; 1797 return 0;
1781 1798
1782 if (!first_run) { 1799 if (!first_run) {
1800 if (idle_halt) {
1801 /*
1802 * When the boot option of "idle=halt" is added, halt
1803 * is used for CPU IDLE.
1804 * In such case C2/C3 is meaningless. So the max_cstate
1805 * is set to one.
1806 */
1807 max_cstate = 1;
1808 }
1783 dmi_check_system(processor_power_dmi_table); 1809 dmi_check_system(processor_power_dmi_table);
1784 max_cstate = acpi_processor_cstate_check(max_cstate); 1810 max_cstate = acpi_processor_cstate_check(max_cstate);
1785 if (max_cstate < ACPI_C_STATES_MAX) 1811 if (max_cstate < ACPI_C_STATES_MAX)
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index d80b2d1441af..b4749969c6b4 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -89,7 +89,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
89 if (event != CPUFREQ_INCOMPATIBLE) 89 if (event != CPUFREQ_INCOMPATIBLE)
90 goto out; 90 goto out;
91 91
92 pr = processors[policy->cpu]; 92 pr = per_cpu(processors, policy->cpu);
93 if (!pr || !pr->performance) 93 if (!pr || !pr->performance)
94 goto out; 94 goto out;
95 95
@@ -572,7 +572,7 @@ int acpi_processor_preregister_performance(
572 572
573 /* Call _PSD for all CPUs */ 573 /* Call _PSD for all CPUs */
574 for_each_possible_cpu(i) { 574 for_each_possible_cpu(i) {
575 pr = processors[i]; 575 pr = per_cpu(processors, i);
576 if (!pr) { 576 if (!pr) {
577 /* Look only at processors in ACPI namespace */ 577 /* Look only at processors in ACPI namespace */
578 continue; 578 continue;
@@ -603,7 +603,7 @@ int acpi_processor_preregister_performance(
603 * domain info. 603 * domain info.
604 */ 604 */
605 for_each_possible_cpu(i) { 605 for_each_possible_cpu(i) {
606 pr = processors[i]; 606 pr = per_cpu(processors, i);
607 if (!pr) 607 if (!pr)
608 continue; 608 continue;
609 609
@@ -624,7 +624,7 @@ int acpi_processor_preregister_performance(
624 624
625 cpus_clear(covered_cpus); 625 cpus_clear(covered_cpus);
626 for_each_possible_cpu(i) { 626 for_each_possible_cpu(i) {
627 pr = processors[i]; 627 pr = per_cpu(processors, i);
628 if (!pr) 628 if (!pr)
629 continue; 629 continue;
630 630
@@ -651,7 +651,7 @@ int acpi_processor_preregister_performance(
651 if (i == j) 651 if (i == j)
652 continue; 652 continue;
653 653
654 match_pr = processors[j]; 654 match_pr = per_cpu(processors, j);
655 if (!match_pr) 655 if (!match_pr)
656 continue; 656 continue;
657 657
@@ -680,7 +680,7 @@ int acpi_processor_preregister_performance(
680 if (i == j) 680 if (i == j)
681 continue; 681 continue;
682 682
683 match_pr = processors[j]; 683 match_pr = per_cpu(processors, j);
684 if (!match_pr) 684 if (!match_pr)
685 continue; 685 continue;
686 686
@@ -697,7 +697,7 @@ int acpi_processor_preregister_performance(
697 697
698err_ret: 698err_ret:
699 for_each_possible_cpu(i) { 699 for_each_possible_cpu(i) {
700 pr = processors[i]; 700 pr = per_cpu(processors, i);
701 if (!pr || !pr->performance) 701 if (!pr || !pr->performance)
702 continue; 702 continue;
703 703
@@ -728,7 +728,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
728 728
729 mutex_lock(&performance_mutex); 729 mutex_lock(&performance_mutex);
730 730
731 pr = processors[cpu]; 731 pr = per_cpu(processors, cpu);
732 if (!pr) { 732 if (!pr) {
733 mutex_unlock(&performance_mutex); 733 mutex_unlock(&performance_mutex);
734 return -ENODEV; 734 return -ENODEV;
@@ -766,7 +766,7 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
766 766
767 mutex_lock(&performance_mutex); 767 mutex_lock(&performance_mutex);
768 768
769 pr = processors[cpu]; 769 pr = per_cpu(processors, cpu);
770 if (!pr) { 770 if (!pr) {
771 mutex_unlock(&performance_mutex); 771 mutex_unlock(&performance_mutex);
772 return; 772 return;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 28509fbba6f9..a56fc6c4394b 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -71,7 +71,7 @@ static int acpi_processor_update_tsd_coord(void)
71 * coordination between all CPUs. 71 * coordination between all CPUs.
72 */ 72 */
73 for_each_possible_cpu(i) { 73 for_each_possible_cpu(i) {
74 pr = processors[i]; 74 pr = per_cpu(processors, i);
75 if (!pr) 75 if (!pr)
76 continue; 76 continue;
77 77
@@ -93,7 +93,7 @@ static int acpi_processor_update_tsd_coord(void)
93 93
94 cpus_clear(covered_cpus); 94 cpus_clear(covered_cpus);
95 for_each_possible_cpu(i) { 95 for_each_possible_cpu(i) {
96 pr = processors[i]; 96 pr = per_cpu(processors, i);
97 if (!pr) 97 if (!pr)
98 continue; 98 continue;
99 99
@@ -119,7 +119,7 @@ static int acpi_processor_update_tsd_coord(void)
119 if (i == j) 119 if (i == j)
120 continue; 120 continue;
121 121
122 match_pr = processors[j]; 122 match_pr = per_cpu(processors, j);
123 if (!match_pr) 123 if (!match_pr)
124 continue; 124 continue;
125 125
@@ -152,7 +152,7 @@ static int acpi_processor_update_tsd_coord(void)
152 if (i == j) 152 if (i == j)
153 continue; 153 continue;
154 154
155 match_pr = processors[j]; 155 match_pr = per_cpu(processors, j);
156 if (!match_pr) 156 if (!match_pr)
157 continue; 157 continue;
158 158
@@ -172,7 +172,7 @@ static int acpi_processor_update_tsd_coord(void)
172 172
173err_ret: 173err_ret:
174 for_each_possible_cpu(i) { 174 for_each_possible_cpu(i) {
175 pr = processors[i]; 175 pr = per_cpu(processors, i);
176 if (!pr) 176 if (!pr)
177 continue; 177 continue;
178 178
@@ -214,7 +214,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
214 struct acpi_processor_throttling *p_throttling; 214 struct acpi_processor_throttling *p_throttling;
215 215
216 cpu = p_tstate->cpu; 216 cpu = p_tstate->cpu;
217 pr = processors[cpu]; 217 pr = per_cpu(processors, cpu);
218 if (!pr) { 218 if (!pr) {
219 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); 219 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
220 return 0; 220 return 0;
@@ -1035,7 +1035,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1035 * cpus. 1035 * cpus.
1036 */ 1036 */
1037 for_each_cpu_mask_nr(i, online_throttling_cpus) { 1037 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1038 match_pr = processors[i]; 1038 match_pr = per_cpu(processors, i);
1039 /* 1039 /*
1040 * If the pointer is invalid, we will report the 1040 * If the pointer is invalid, we will report the
1041 * error message and continue. 1041 * error message and continue.
@@ -1232,7 +1232,10 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
1232 int result = 0; 1232 int result = 0;
1233 struct seq_file *m = file->private_data; 1233 struct seq_file *m = file->private_data;
1234 struct acpi_processor *pr = m->private; 1234 struct acpi_processor *pr = m->private;
1235 char state_string[12] = { '\0' }; 1235 char state_string[5] = "";
1236 char *charp = NULL;
1237 size_t state_val = 0;
1238 char tmpbuf[5] = "";
1236 1239
1237 if (!pr || (count > sizeof(state_string) - 1)) 1240 if (!pr || (count > sizeof(state_string) - 1))
1238 return -EINVAL; 1241 return -EINVAL;
@@ -1241,10 +1244,23 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
1241 return -EFAULT; 1244 return -EFAULT;
1242 1245
1243 state_string[count] = '\0'; 1246 state_string[count] = '\0';
1247 if ((count > 0) && (state_string[count-1] == '\n'))
1248 state_string[count-1] = '\0';
1244 1249
1245 result = acpi_processor_set_throttling(pr, 1250 charp = state_string;
1246 simple_strtoul(state_string, 1251 if ((state_string[0] == 't') || (state_string[0] == 'T'))
1247 NULL, 0)); 1252 charp++;
1253
1254 state_val = simple_strtoul(charp, NULL, 0);
1255 if (state_val >= pr->throttling.state_count)
1256 return -EINVAL;
1257
1258 snprintf(tmpbuf, 5, "%zu", state_val);
1259
1260 if (strcmp(tmpbuf, charp) != 0)
1261 return -EINVAL;
1262
1263 result = acpi_processor_set_throttling(pr, state_val);
1248 if (result) 1264 if (result)
1249 return result; 1265 return result;
1250 1266
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
new file mode 100644
index 000000000000..a6b662c00b67
--- /dev/null
+++ b/drivers/acpi/reboot.c
@@ -0,0 +1,50 @@
1
2#include <linux/pci.h>
3#include <linux/acpi.h>
4#include <acpi/reboot.h>
5
6void acpi_reboot(void)
7{
8 struct acpi_generic_address *rr;
9 struct pci_bus *bus0;
10 u8 reset_value;
11 unsigned int devfn;
12
13 if (acpi_disabled)
14 return;
15
16 rr = &acpi_gbl_FADT.reset_register;
17
18 /* Is the reset register supported? */
19 if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
20 rr->bit_width != 8 || rr->bit_offset != 0)
21 return;
22
23 reset_value = acpi_gbl_FADT.reset_value;
24
25 /* The reset register can only exist in I/O, Memory or PCI config space
26 * on a device on bus 0. */
27 switch (rr->space_id) {
28 case ACPI_ADR_SPACE_PCI_CONFIG:
29 /* The reset register can only live on bus 0. */
30 bus0 = pci_find_bus(0, 0);
31 if (!bus0)
32 return;
33 /* Form PCI device/function pair. */
34 devfn = PCI_DEVFN((rr->address >> 32) & 0xffff,
35 (rr->address >> 16) & 0xffff);
36 printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG.");
37 /* Write the value that resets us. */
38 pci_bus_write_config_byte(bus0, devfn,
39 (rr->address & 0xffff), reset_value);
40 break;
41
42 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
43 case ACPI_ADR_SPACE_SYSTEM_IO:
44 printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n");
45 acpi_hw_low_level_write(8, reset_value, rr);
46 break;
47 }
48 /* Wait ten seconds */
49 acpi_os_stall(10000000);
50}
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 8a112d11d491..f61ebc679e66 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -73,7 +73,7 @@ acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length);
73 73
74static u8 acpi_rs_count_set_bits(u16 bit_field) 74static u8 acpi_rs_count_set_bits(u16 bit_field)
75{ 75{
76 acpi_native_uint bits_set; 76 u8 bits_set;
77 77
78 ACPI_FUNCTION_ENTRY(); 78 ACPI_FUNCTION_ENTRY();
79 79
@@ -84,7 +84,7 @@ static u8 acpi_rs_count_set_bits(u16 bit_field)
84 bit_field &= (u16) (bit_field - 1); 84 bit_field &= (u16) (bit_field - 1);
85 } 85 }
86 86
87 return ((u8) bits_set); 87 return bits_set;
88} 88}
89 89
90/******************************************************************************* 90/*******************************************************************************
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index faddaee1bc07..7804a8c40e7a 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -181,9 +181,9 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
181 } 181 }
182 182
183 /* 183 /*
184 * Loop through the ACPI_INTERNAL_OBJECTS - Each object 184 * Loop through the ACPI_INTERNAL_OBJECTS - Each object should be a
185 * should be a package that in turn contains an 185 * package that in turn contains an acpi_integer Address, a u8 Pin,
186 * acpi_integer Address, a u8 Pin, a Name and a u8 source_index. 186 * a Name, and a u8 source_index.
187 */ 187 */
188 top_object_list = package_object->package.elements; 188 top_object_list = package_object->package.elements;
189 number_of_elements = package_object->package.count; 189 number_of_elements = package_object->package.count;
@@ -240,9 +240,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
240 /* 1) First subobject: Dereference the PRT.Address */ 240 /* 1) First subobject: Dereference the PRT.Address */
241 241
242 obj_desc = sub_object_list[0]; 242 obj_desc = sub_object_list[0];
243 if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { 243 if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
244 user_prt->address = obj_desc->integer.value;
245 } else {
246 ACPI_ERROR((AE_INFO, 244 ACPI_ERROR((AE_INFO,
247 "(PRT[%X].Address) Need Integer, found %s", 245 "(PRT[%X].Address) Need Integer, found %s",
248 index, 246 index,
@@ -250,12 +248,12 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
250 return_ACPI_STATUS(AE_BAD_DATA); 248 return_ACPI_STATUS(AE_BAD_DATA);
251 } 249 }
252 250
251 user_prt->address = obj_desc->integer.value;
252
253 /* 2) Second subobject: Dereference the PRT.Pin */ 253 /* 2) Second subobject: Dereference the PRT.Pin */
254 254
255 obj_desc = sub_object_list[1]; 255 obj_desc = sub_object_list[1];
256 if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { 256 if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
257 user_prt->pin = (u32) obj_desc->integer.value;
258 } else {
259 ACPI_ERROR((AE_INFO, 257 ACPI_ERROR((AE_INFO,
260 "(PRT[%X].Pin) Need Integer, found %s", 258 "(PRT[%X].Pin) Need Integer, found %s",
261 index, 259 index,
@@ -284,6 +282,25 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
284 } 282 }
285 } 283 }
286 284
285 user_prt->pin = (u32) obj_desc->integer.value;
286
287 /*
288 * If the BIOS has erroneously reversed the _PRT source_name (index 2)
289 * and the source_index (index 3), fix it. _PRT is important enough to
290 * workaround this BIOS error. This also provides compatibility with
291 * other ACPI implementations.
292 */
293 obj_desc = sub_object_list[3];
294 if (!obj_desc
295 || (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) {
296 sub_object_list[3] = sub_object_list[2];
297 sub_object_list[2] = obj_desc;
298
299 ACPI_WARNING((AE_INFO,
300 "(PRT[%X].Source) SourceName and SourceIndex are reversed, fixed",
301 index));
302 }
303
287 /* 304 /*
288 * 3) Third subobject: Dereference the PRT.source_name 305 * 3) Third subobject: Dereference the PRT.source_name
289 * The name may be unresolved (slack mode), so allow a null object 306 * The name may be unresolved (slack mode), so allow a null object
@@ -364,9 +381,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
364 /* 4) Fourth subobject: Dereference the PRT.source_index */ 381 /* 4) Fourth subobject: Dereference the PRT.source_index */
365 382
366 obj_desc = sub_object_list[source_index_index]; 383 obj_desc = sub_object_list[source_index_index];
367 if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { 384 if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
368 user_prt->source_index = (u32) obj_desc->integer.value;
369 } else {
370 ACPI_ERROR((AE_INFO, 385 ACPI_ERROR((AE_INFO,
371 "(PRT[%X].SourceIndex) Need Integer, found %s", 386 "(PRT[%X].SourceIndex) Need Integer, found %s",
372 index, 387 index,
@@ -374,6 +389,8 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
374 return_ACPI_STATUS(AE_BAD_DATA); 389 return_ACPI_STATUS(AE_BAD_DATA);
375 } 390 }
376 391
392 user_prt->source_index = (u32) obj_desc->integer.value;
393
377 /* Point to the next union acpi_operand_object in the top level package */ 394 /* Point to the next union acpi_operand_object in the top level package */
378 395
379 top_object_list++; 396 top_object_list++;
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index de1ac3881b22..96a6c0353255 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -82,7 +82,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
82 82
83 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource); 83 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
84 84
85 if (((acpi_native_uint) resource) & 0x3) { 85 if (((acpi_size) resource) & 0x3) {
86 86
87 /* Each internal resource struct is expected to be 32-bit aligned */ 87 /* Each internal resource struct is expected to be 32-bit aligned */
88 88
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index befe2302f41b..f7b3bcd59ba7 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -62,7 +62,7 @@ ACPI_MODULE_NAME("rsutils")
62 ******************************************************************************/ 62 ******************************************************************************/
63u8 acpi_rs_decode_bitmask(u16 mask, u8 * list) 63u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
64{ 64{
65 acpi_native_uint i; 65 u8 i;
66 u8 bit_count; 66 u8 bit_count;
67 67
68 ACPI_FUNCTION_ENTRY(); 68 ACPI_FUNCTION_ENTRY();
@@ -71,7 +71,7 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
71 71
72 for (i = 0, bit_count = 0; mask; i++) { 72 for (i = 0, bit_count = 0; mask; i++) {
73 if (mask & 0x0001) { 73 if (mask & 0x0001) {
74 list[bit_count] = (u8) i; 74 list[bit_count] = i;
75 bit_count++; 75 bit_count++;
76 } 76 }
77 77
@@ -96,8 +96,8 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
96 96
97u16 acpi_rs_encode_bitmask(u8 * list, u8 count) 97u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
98{ 98{
99 acpi_native_uint i; 99 u32 i;
100 acpi_native_uint mask; 100 u16 mask;
101 101
102 ACPI_FUNCTION_ENTRY(); 102 ACPI_FUNCTION_ENTRY();
103 103
@@ -107,7 +107,7 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
107 mask |= (0x1 << list[i]); 107 mask |= (0x1 << list[i]);
108 } 108 }
109 109
110 return ((u16) mask); 110 return mask;
111} 111}
112 112
113/******************************************************************************* 113/*******************************************************************************
@@ -130,7 +130,7 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
130void 130void
131acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type) 131acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
132{ 132{
133 acpi_native_uint i; 133 u32 i;
134 134
135 ACPI_FUNCTION_ENTRY(); 135 ACPI_FUNCTION_ENTRY();
136 136
@@ -679,7 +679,6 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
679 info->prefix_node = node; 679 info->prefix_node = node;
680 info->pathname = METHOD_NAME__SRS; 680 info->pathname = METHOD_NAME__SRS;
681 info->parameters = args; 681 info->parameters = args;
682 info->parameter_type = ACPI_PARAM_ARGS;
683 info->flags = ACPI_IGNORE_RETURN_VALUE; 682 info->flags = ACPI_IGNORE_RETURN_VALUE;
684 683
685 /* 684 /*
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 6d85289f1c12..f3132aa47a69 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -6,6 +6,8 @@
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/acpi.h> 8#include <linux/acpi.h>
9#include <linux/signal.h>
10#include <linux/kthread.h>
9 11
10#include <acpi/acpi_drivers.h> 12#include <acpi/acpi_drivers.h>
11#include <acpi/acinterp.h> /* for acpi_ex_eisa_id_to_string() */ 13#include <acpi/acinterp.h> /* for acpi_ex_eisa_id_to_string() */
@@ -92,17 +94,37 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha
92} 94}
93static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); 95static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
94 96
95static int acpi_eject_operation(acpi_handle handle, int lockable) 97static int acpi_bus_hot_remove_device(void *context)
96{ 98{
99 struct acpi_device *device;
100 acpi_handle handle = context;
97 struct acpi_object_list arg_list; 101 struct acpi_object_list arg_list;
98 union acpi_object arg; 102 union acpi_object arg;
99 acpi_status status = AE_OK; 103 acpi_status status = AE_OK;
100 104
101 /* 105 if (acpi_bus_get_device(handle, &device))
102 * TBD: evaluate _PS3? 106 return 0;
103 */ 107
108 if (!device)
109 return 0;
110
111 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
112 "Hot-removing device %s...\n", device->dev.bus_id));
113
104 114
105 if (lockable) { 115 if (acpi_bus_trim(device, 1)) {
116 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
117 "Removing device failed\n"));
118 return -1;
119 }
120
121 /* power off device */
122 status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
123 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
124 ACPI_DEBUG_PRINT((ACPI_DB_WARN,
125 "Power-off device failed\n"));
126
127 if (device->flags.lockable) {
106 arg_list.count = 1; 128 arg_list.count = 1;
107 arg_list.pointer = &arg; 129 arg_list.pointer = &arg;
108 arg.type = ACPI_TYPE_INTEGER; 130 arg.type = ACPI_TYPE_INTEGER;
@@ -118,26 +140,22 @@ static int acpi_eject_operation(acpi_handle handle, int lockable)
118 /* 140 /*
119 * TBD: _EJD support. 141 * TBD: _EJD support.
120 */ 142 */
121
122 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); 143 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
123 if (ACPI_FAILURE(status)) { 144 if (ACPI_FAILURE(status))
124 return (-ENODEV); 145 return -ENODEV;
125 }
126 146
127 return (0); 147 return 0;
128} 148}
129 149
130static ssize_t 150static ssize_t
131acpi_eject_store(struct device *d, struct device_attribute *attr, 151acpi_eject_store(struct device *d, struct device_attribute *attr,
132 const char *buf, size_t count) 152 const char *buf, size_t count)
133{ 153{
134 int result;
135 int ret = count; 154 int ret = count;
136 int islockable;
137 acpi_status status; 155 acpi_status status;
138 acpi_handle handle;
139 acpi_object_type type = 0; 156 acpi_object_type type = 0;
140 struct acpi_device *acpi_device = to_acpi_device(d); 157 struct acpi_device *acpi_device = to_acpi_device(d);
158 struct task_struct *task;
141 159
142 if ((!count) || (buf[0] != '1')) { 160 if ((!count) || (buf[0] != '1')) {
143 return -EINVAL; 161 return -EINVAL;
@@ -154,18 +172,12 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
154 goto err; 172 goto err;
155 } 173 }
156 174
157 islockable = acpi_device->flags.lockable; 175 /* remove the device in another thread to fix the deadlock issue */
158 handle = acpi_device->handle; 176 task = kthread_run(acpi_bus_hot_remove_device,
159 177 acpi_device->handle, "acpi_hot_remove_device");
160 result = acpi_bus_trim(acpi_device, 1); 178 if (IS_ERR(task))
161 179 ret = PTR_ERR(task);
162 if (!result) 180err:
163 result = acpi_eject_operation(handle, islockable);
164
165 if (result) {
166 ret = -EBUSY;
167 }
168 err:
169 return ret; 181 return ret;
170} 182}
171 183
@@ -691,9 +703,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
691 acpi_status status = 0; 703 acpi_status status = 0;
692 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 704 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
693 union acpi_object *package = NULL; 705 union acpi_object *package = NULL;
694 union acpi_object in_arg[3]; 706 int psw_error;
695 struct acpi_object_list arg_list = { 3, in_arg };
696 acpi_status psw_status = AE_OK;
697 707
698 struct acpi_device_id button_device_ids[] = { 708 struct acpi_device_id button_device_ids[] = {
699 {"PNP0C0D", 0}, 709 {"PNP0C0D", 0},
@@ -725,39 +735,11 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
725 * So it is necessary to call _DSW object first. Only when it is not 735 * So it is necessary to call _DSW object first. Only when it is not
726 * present will the _PSW object used. 736 * present will the _PSW object used.
727 */ 737 */
728 /* 738 psw_error = acpi_device_sleep_wake(device, 0, 0, 0);
729 * Three agruments are needed for the _DSW object. 739 if (psw_error)
730 * Argument 0: enable/disable the wake capabilities 740 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
731 * When _DSW object is called to disable the wake capabilities, maybe 741 "error in _DSW or _PSW evaluation\n"));
732 * the first argument is filled. The value of the other two agruments 742
733 * is meaningless.
734 */
735 in_arg[0].type = ACPI_TYPE_INTEGER;
736 in_arg[0].integer.value = 0;
737 in_arg[1].type = ACPI_TYPE_INTEGER;
738 in_arg[1].integer.value = 0;
739 in_arg[2].type = ACPI_TYPE_INTEGER;
740 in_arg[2].integer.value = 0;
741 psw_status = acpi_evaluate_object(device->handle, "_DSW",
742 &arg_list, NULL);
743 if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
744 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in evaluate _DSW\n"));
745 /*
746 * When the _DSW object is not present, OSPM will call _PSW object.
747 */
748 if (psw_status == AE_NOT_FOUND) {
749 /*
750 * Only one agruments is required for the _PSW object.
751 * agrument 0: enable/disable the wake capabilities
752 */
753 arg_list.count = 1;
754 in_arg[0].integer.value = 0;
755 psw_status = acpi_evaluate_object(device->handle, "_PSW",
756 &arg_list, NULL);
757 if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
758 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in "
759 "evaluate _PSW\n"));
760 }
761 /* Power button, Lid switch always enable wakeup */ 743 /* Power button, Lid switch always enable wakeup */
762 if (!acpi_match_device_ids(device, button_device_ids)) 744 if (!acpi_match_device_ids(device, button_device_ids))
763 device->wakeup.flags.run_wake = 1; 745 device->wakeup.flags.run_wake = 1;
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 495c63a3e0af..0489a7d1d42c 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -24,10 +24,6 @@
24 24
25u8 sleep_states[ACPI_S_STATE_COUNT]; 25u8 sleep_states[ACPI_S_STATE_COUNT];
26 26
27#ifdef CONFIG_PM_SLEEP
28static u32 acpi_target_sleep_state = ACPI_STATE_S0;
29#endif
30
31static int acpi_sleep_prepare(u32 acpi_state) 27static int acpi_sleep_prepare(u32 acpi_state)
32{ 28{
33#ifdef CONFIG_ACPI_SLEEP 29#ifdef CONFIG_ACPI_SLEEP
@@ -49,9 +45,96 @@ static int acpi_sleep_prepare(u32 acpi_state)
49 return 0; 45 return 0;
50} 46}
51 47
52#ifdef CONFIG_SUSPEND 48#ifdef CONFIG_PM_SLEEP
53static struct platform_suspend_ops acpi_suspend_ops; 49static u32 acpi_target_sleep_state = ACPI_STATE_S0;
50
51/*
52 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
53 * user to request that behavior by using the 'acpi_old_suspend_ordering'
54 * kernel command line option that causes the following variable to be set.
55 */
56static bool old_suspend_ordering;
54 57
58void __init acpi_old_suspend_ordering(void)
59{
60 old_suspend_ordering = true;
61}
62
63/**
64 * acpi_pm_disable_gpes - Disable the GPEs.
65 */
66static int acpi_pm_disable_gpes(void)
67{
68 acpi_hw_disable_all_gpes();
69 return 0;
70}
71
72/**
73 * __acpi_pm_prepare - Prepare the platform to enter the target state.
74 *
75 * If necessary, set the firmware waking vector and do arch-specific
76 * nastiness to get the wakeup code to the waking vector.
77 */
78static int __acpi_pm_prepare(void)
79{
80 int error = acpi_sleep_prepare(acpi_target_sleep_state);
81
82 if (error)
83 acpi_target_sleep_state = ACPI_STATE_S0;
84 return error;
85}
86
87/**
88 * acpi_pm_prepare - Prepare the platform to enter the target sleep
89 * state and disable the GPEs.
90 */
91static int acpi_pm_prepare(void)
92{
93 int error = __acpi_pm_prepare();
94
95 if (!error)
96 acpi_hw_disable_all_gpes();
97 return error;
98}
99
100/**
101 * acpi_pm_finish - Instruct the platform to leave a sleep state.
102 *
103 * This is called after we wake back up (or if entering the sleep state
104 * failed).
105 */
106static void acpi_pm_finish(void)
107{
108 u32 acpi_state = acpi_target_sleep_state;
109
110 if (acpi_state == ACPI_STATE_S0)
111 return;
112
113 printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
114 acpi_state);
115 acpi_disable_wakeup_device(acpi_state);
116 acpi_leave_sleep_state(acpi_state);
117
118 /* reset firmware waking vector */
119 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
120
121 acpi_target_sleep_state = ACPI_STATE_S0;
122}
123
124/**
125 * acpi_pm_end - Finish up suspend sequence.
126 */
127static void acpi_pm_end(void)
128{
129 /*
130 * This is necessary in case acpi_pm_finish() is not called during a
131 * failing transition to a sleep state.
132 */
133 acpi_target_sleep_state = ACPI_STATE_S0;
134}
135#endif /* CONFIG_PM_SLEEP */
136
137#ifdef CONFIG_SUSPEND
55extern void do_suspend_lowlevel(void); 138extern void do_suspend_lowlevel(void);
56 139
57static u32 acpi_suspend_states[] = { 140static u32 acpi_suspend_states[] = {
@@ -61,13 +144,10 @@ static u32 acpi_suspend_states[] = {
61 [PM_SUSPEND_MAX] = ACPI_STATE_S5 144 [PM_SUSPEND_MAX] = ACPI_STATE_S5
62}; 145};
63 146
64static int init_8259A_after_S1;
65
66/** 147/**
67 * acpi_suspend_begin - Set the target system sleep state to the state 148 * acpi_suspend_begin - Set the target system sleep state to the state
68 * associated with given @pm_state, if supported. 149 * associated with given @pm_state, if supported.
69 */ 150 */
70
71static int acpi_suspend_begin(suspend_state_t pm_state) 151static int acpi_suspend_begin(suspend_state_t pm_state)
72{ 152{
73 u32 acpi_state = acpi_suspend_states[pm_state]; 153 u32 acpi_state = acpi_suspend_states[pm_state];
@@ -84,25 +164,6 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
84} 164}
85 165
86/** 166/**
87 * acpi_suspend_prepare - Do preliminary suspend work.
88 *
89 * If necessary, set the firmware waking vector and do arch-specific
90 * nastiness to get the wakeup code to the waking vector.
91 */
92
93static int acpi_suspend_prepare(void)
94{
95 int error = acpi_sleep_prepare(acpi_target_sleep_state);
96
97 if (error) {
98 acpi_target_sleep_state = ACPI_STATE_S0;
99 return error;
100 }
101
102 return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT;
103}
104
105/**
106 * acpi_suspend_enter - Actually enter a sleep state. 167 * acpi_suspend_enter - Actually enter a sleep state.
107 * @pm_state: ignored 168 * @pm_state: ignored
108 * 169 *
@@ -110,7 +171,6 @@ static int acpi_suspend_prepare(void)
110 * assembly, which in turn call acpi_enter_sleep_state(). 171 * assembly, which in turn call acpi_enter_sleep_state().
111 * It's unfortunate, but it works. Please fix if you're feeling frisky. 172 * It's unfortunate, but it works. Please fix if you're feeling frisky.
112 */ 173 */
113
114static int acpi_suspend_enter(suspend_state_t pm_state) 174static int acpi_suspend_enter(suspend_state_t pm_state)
115{ 175{
116 acpi_status status = AE_OK; 176 acpi_status status = AE_OK;
@@ -167,46 +227,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
167 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 227 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
168} 228}
169 229
170/**
171 * acpi_suspend_finish - Instruct the platform to leave a sleep state.
172 *
173 * This is called after we wake back up (or if entering the sleep state
174 * failed).
175 */
176
177static void acpi_suspend_finish(void)
178{
179 u32 acpi_state = acpi_target_sleep_state;
180
181 acpi_disable_wakeup_device(acpi_state);
182 acpi_leave_sleep_state(acpi_state);
183
184 /* reset firmware waking vector */
185 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
186
187 acpi_target_sleep_state = ACPI_STATE_S0;
188
189#ifdef CONFIG_X86
190 if (init_8259A_after_S1) {
191 printk("Broken toshiba laptop -> kicking interrupts\n");
192 init_8259A(0);
193 }
194#endif
195}
196
197/**
198 * acpi_suspend_end - Finish up suspend sequence.
199 */
200
201static void acpi_suspend_end(void)
202{
203 /*
204 * This is necessary in case acpi_suspend_finish() is not called during a
205 * failing transition to a sleep state.
206 */
207 acpi_target_sleep_state = ACPI_STATE_S0;
208}
209
210static int acpi_suspend_state_valid(suspend_state_t pm_state) 230static int acpi_suspend_state_valid(suspend_state_t pm_state)
211{ 231{
212 u32 acpi_state; 232 u32 acpi_state;
@@ -226,30 +246,39 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
226static struct platform_suspend_ops acpi_suspend_ops = { 246static struct platform_suspend_ops acpi_suspend_ops = {
227 .valid = acpi_suspend_state_valid, 247 .valid = acpi_suspend_state_valid,
228 .begin = acpi_suspend_begin, 248 .begin = acpi_suspend_begin,
229 .prepare = acpi_suspend_prepare, 249 .prepare = acpi_pm_prepare,
230 .enter = acpi_suspend_enter, 250 .enter = acpi_suspend_enter,
231 .finish = acpi_suspend_finish, 251 .finish = acpi_pm_finish,
232 .end = acpi_suspend_end, 252 .end = acpi_pm_end,
233}; 253};
234 254
235/* 255/**
236 * Toshiba fails to preserve interrupts over S1, reinitialization 256 * acpi_suspend_begin_old - Set the target system sleep state to the
237 * of 8259 is needed after S1 resume. 257 * state associated with given @pm_state, if supported, and
258 * execute the _PTS control method. This function is used if the
259 * pre-ACPI 2.0 suspend ordering has been requested.
238 */ 260 */
239static int __init init_ints_after_s1(const struct dmi_system_id *d) 261static int acpi_suspend_begin_old(suspend_state_t pm_state)
240{ 262{
241 printk(KERN_WARNING "%s with broken S1 detected.\n", d->ident); 263 int error = acpi_suspend_begin(pm_state);
242 init_8259A_after_S1 = 1; 264
243 return 0; 265 if (!error)
266 error = __acpi_pm_prepare();
267 return error;
244} 268}
245 269
246static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 270/*
247 { 271 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
248 .callback = init_ints_after_s1, 272 * been requested.
249 .ident = "Toshiba Satellite 4030cdt", 273 */
250 .matches = {DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),}, 274static struct platform_suspend_ops acpi_suspend_ops_old = {
251 }, 275 .valid = acpi_suspend_state_valid,
252 {}, 276 .begin = acpi_suspend_begin_old,
277 .prepare = acpi_pm_disable_gpes,
278 .enter = acpi_suspend_enter,
279 .finish = acpi_pm_finish,
280 .end = acpi_pm_end,
281 .recover = acpi_pm_finish,
253}; 282};
254#endif /* CONFIG_SUSPEND */ 283#endif /* CONFIG_SUSPEND */
255 284
@@ -257,22 +286,9 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
257static int acpi_hibernation_begin(void) 286static int acpi_hibernation_begin(void)
258{ 287{
259 acpi_target_sleep_state = ACPI_STATE_S4; 288 acpi_target_sleep_state = ACPI_STATE_S4;
260
261 return 0; 289 return 0;
262} 290}
263 291
264static int acpi_hibernation_prepare(void)
265{
266 int error = acpi_sleep_prepare(ACPI_STATE_S4);
267
268 if (error) {
269 acpi_target_sleep_state = ACPI_STATE_S0;
270 return error;
271 }
272
273 return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT;
274}
275
276static int acpi_hibernation_enter(void) 292static int acpi_hibernation_enter(void)
277{ 293{
278 acpi_status status = AE_OK; 294 acpi_status status = AE_OK;
@@ -302,52 +318,55 @@ static void acpi_hibernation_leave(void)
302 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 318 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
303} 319}
304 320
305static void acpi_hibernation_finish(void) 321static void acpi_pm_enable_gpes(void)
306{ 322{
307 acpi_disable_wakeup_device(ACPI_STATE_S4); 323 acpi_hw_enable_all_runtime_gpes();
308 acpi_leave_sleep_state(ACPI_STATE_S4);
309
310 /* reset firmware waking vector */
311 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
312
313 acpi_target_sleep_state = ACPI_STATE_S0;
314} 324}
315 325
316static void acpi_hibernation_end(void) 326static struct platform_hibernation_ops acpi_hibernation_ops = {
317{ 327 .begin = acpi_hibernation_begin,
318 /* 328 .end = acpi_pm_end,
319 * This is necessary in case acpi_hibernation_finish() is not called 329 .pre_snapshot = acpi_pm_prepare,
320 * during a failing transition to the sleep state. 330 .finish = acpi_pm_finish,
321 */ 331 .prepare = acpi_pm_prepare,
322 acpi_target_sleep_state = ACPI_STATE_S0; 332 .enter = acpi_hibernation_enter,
323} 333 .leave = acpi_hibernation_leave,
334 .pre_restore = acpi_pm_disable_gpes,
335 .restore_cleanup = acpi_pm_enable_gpes,
336};
324 337
325static int acpi_hibernation_pre_restore(void) 338/**
339 * acpi_hibernation_begin_old - Set the target system sleep state to
340 * ACPI_STATE_S4 and execute the _PTS control method. This
341 * function is used if the pre-ACPI 2.0 suspend ordering has been
342 * requested.
343 */
344static int acpi_hibernation_begin_old(void)
326{ 345{
327 acpi_status status; 346 int error = acpi_sleep_prepare(ACPI_STATE_S4);
328
329 status = acpi_hw_disable_all_gpes();
330
331 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
332}
333 347
334static void acpi_hibernation_restore_cleanup(void) 348 if (!error)
335{ 349 acpi_target_sleep_state = ACPI_STATE_S4;
336 acpi_hw_enable_all_runtime_gpes(); 350 return error;
337} 351}
338 352
339static struct platform_hibernation_ops acpi_hibernation_ops = { 353/*
340 .begin = acpi_hibernation_begin, 354 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
341 .end = acpi_hibernation_end, 355 * been requested.
342 .pre_snapshot = acpi_hibernation_prepare, 356 */
343 .finish = acpi_hibernation_finish, 357static struct platform_hibernation_ops acpi_hibernation_ops_old = {
344 .prepare = acpi_hibernation_prepare, 358 .begin = acpi_hibernation_begin_old,
359 .end = acpi_pm_end,
360 .pre_snapshot = acpi_pm_disable_gpes,
361 .finish = acpi_pm_finish,
362 .prepare = acpi_pm_disable_gpes,
345 .enter = acpi_hibernation_enter, 363 .enter = acpi_hibernation_enter,
346 .leave = acpi_hibernation_leave, 364 .leave = acpi_hibernation_leave,
347 .pre_restore = acpi_hibernation_pre_restore, 365 .pre_restore = acpi_pm_disable_gpes,
348 .restore_cleanup = acpi_hibernation_restore_cleanup, 366 .restore_cleanup = acpi_pm_enable_gpes,
367 .recover = acpi_pm_finish,
349}; 368};
350#endif /* CONFIG_HIBERNATION */ 369#endif /* CONFIG_HIBERNATION */
351 370
352int acpi_suspend(u32 acpi_state) 371int acpi_suspend(u32 acpi_state)
353{ 372{
@@ -368,8 +387,8 @@ int acpi_suspend(u32 acpi_state)
368/** 387/**
369 * acpi_pm_device_sleep_state - return preferred power state of ACPI device 388 * acpi_pm_device_sleep_state - return preferred power state of ACPI device
370 * in the system sleep state given by %acpi_target_sleep_state 389 * in the system sleep state given by %acpi_target_sleep_state
371 * @dev: device to examine 390 * @dev: device to examine; its driver model wakeup flags control
372 * @wake: if set, the device should be able to wake up the system 391 * whether it should be able to wake up the system
373 * @d_min_p: used to store the upper limit of allowed states range 392 * @d_min_p: used to store the upper limit of allowed states range
374 * Return value: preferred power state of the device on success, -ENODEV on 393 * Return value: preferred power state of the device on success, -ENODEV on
375 * failure (ie. if there's no 'struct acpi_device' for @dev) 394 * failure (ie. if there's no 'struct acpi_device' for @dev)
@@ -387,7 +406,7 @@ int acpi_suspend(u32 acpi_state)
387 * via @wake. 406 * via @wake.
388 */ 407 */
389 408
390int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) 409int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
391{ 410{
392 acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 411 acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
393 struct acpi_device *adev; 412 struct acpi_device *adev;
@@ -426,7 +445,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
426 * can wake the system. _S0W may be valid, too. 445 * can wake the system. _S0W may be valid, too.
427 */ 446 */
428 if (acpi_target_sleep_state == ACPI_STATE_S0 || 447 if (acpi_target_sleep_state == ACPI_STATE_S0 ||
429 (wake && adev->wakeup.state.enabled && 448 (device_may_wakeup(dev) && adev->wakeup.state.enabled &&
430 adev->wakeup.sleep_state <= acpi_target_sleep_state)) { 449 adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
431 acpi_status status; 450 acpi_status status;
432 451
@@ -448,6 +467,31 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
448 *d_min_p = d_min; 467 *d_min_p = d_min;
449 return d_max; 468 return d_max;
450} 469}
470
471/**
472 * acpi_pm_device_sleep_wake - enable or disable the system wake-up
473 * capability of given device
474 * @dev: device to handle
475 * @enable: 'true' - enable, 'false' - disable the wake-up capability
476 */
477int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
478{
479 acpi_handle handle;
480 struct acpi_device *adev;
481
482 if (!device_may_wakeup(dev))
483 return -EINVAL;
484
485 handle = DEVICE_ACPI_HANDLE(dev);
486 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
487 printk(KERN_DEBUG "ACPI handle has no context!\n");
488 return -ENODEV;
489 }
490
491 return enable ?
492 acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
493 acpi_disable_wakeup_device_power(adev);
494}
451#endif 495#endif
452 496
453static void acpi_power_off_prepare(void) 497static void acpi_power_off_prepare(void)
@@ -472,8 +516,6 @@ int __init acpi_sleep_init(void)
472 u8 type_a, type_b; 516 u8 type_a, type_b;
473#ifdef CONFIG_SUSPEND 517#ifdef CONFIG_SUSPEND
474 int i = 0; 518 int i = 0;
475
476 dmi_check_system(acpisleep_dmi_table);
477#endif 519#endif
478 520
479 if (acpi_disabled) 521 if (acpi_disabled)
@@ -491,13 +533,15 @@ int __init acpi_sleep_init(void)
491 } 533 }
492 } 534 }
493 535
494 suspend_set_ops(&acpi_suspend_ops); 536 suspend_set_ops(old_suspend_ordering ?
537 &acpi_suspend_ops_old : &acpi_suspend_ops);
495#endif 538#endif
496 539
497#ifdef CONFIG_HIBERNATION 540#ifdef CONFIG_HIBERNATION
498 status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); 541 status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
499 if (ACPI_SUCCESS(status)) { 542 if (ACPI_SUCCESS(status)) {
500 hibernation_set_ops(&acpi_hibernation_ops); 543 hibernation_set_ops(old_suspend_ordering ?
544 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
501 sleep_states[ACPI_STATE_S4] = 1; 545 sleep_states[ACPI_STATE_S4] = 1;
502 printk(" S4"); 546 printk(" S4");
503 } 547 }
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c
index ed8e41becf0c..38655eb132dc 100644
--- a/drivers/acpi/sleep/wakeup.c
+++ b/drivers/acpi/sleep/wakeup.c
@@ -42,7 +42,7 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state)
42 continue; 42 continue;
43 43
44 spin_unlock(&acpi_device_lock); 44 spin_unlock(&acpi_device_lock);
45 acpi_enable_wakeup_device_power(dev); 45 acpi_enable_wakeup_device_power(dev, sleep_state);
46 spin_lock(&acpi_device_lock); 46 spin_lock(&acpi_device_lock);
47 } 47 }
48 spin_unlock(&acpi_device_lock); 48 spin_unlock(&acpi_device_lock);
@@ -66,13 +66,15 @@ void acpi_enable_wakeup_device(u8 sleep_state)
66 list_for_each_safe(node, next, &acpi_wakeup_device_list) { 66 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
67 struct acpi_device *dev = 67 struct acpi_device *dev =
68 container_of(node, struct acpi_device, wakeup_list); 68 container_of(node, struct acpi_device, wakeup_list);
69
69 if (!dev->wakeup.flags.valid) 70 if (!dev->wakeup.flags.valid)
70 continue; 71 continue;
72
71 /* If users want to disable run-wake GPE, 73 /* If users want to disable run-wake GPE,
72 * we only disable it for wake and leave it for runtime 74 * we only disable it for wake and leave it for runtime
73 */ 75 */
74 if (!dev->wakeup.state.enabled || 76 if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
75 sleep_state > (u32) dev->wakeup.sleep_state) { 77 || sleep_state > (u32) dev->wakeup.sleep_state) {
76 if (dev->wakeup.flags.run_wake) { 78 if (dev->wakeup.flags.run_wake) {
77 spin_unlock(&acpi_device_lock); 79 spin_unlock(&acpi_device_lock);
78 /* set_gpe_type will disable GPE, leave it like that */ 80 /* set_gpe_type will disable GPE, leave it like that */
@@ -110,8 +112,9 @@ void acpi_disable_wakeup_device(u8 sleep_state)
110 112
111 if (!dev->wakeup.flags.valid) 113 if (!dev->wakeup.flags.valid)
112 continue; 114 continue;
113 if (!dev->wakeup.state.enabled || 115
114 sleep_state > (u32) dev->wakeup.sleep_state) { 116 if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
117 || sleep_state > (u32) dev->wakeup.sleep_state) {
115 if (dev->wakeup.flags.run_wake) { 118 if (dev->wakeup.flags.run_wake) {
116 spin_unlock(&acpi_device_lock); 119 spin_unlock(&acpi_device_lock);
117 acpi_set_gpe_type(dev->wakeup.gpe_device, 120 acpi_set_gpe_type(dev->wakeup.gpe_device,
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 5bd2dec9a7ac..d8e3f153b295 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -167,7 +167,13 @@ static int acpi_system_sysfs_init(void)
167#define COUNT_ERROR 2 /* other */ 167#define COUNT_ERROR 2 /* other */
168#define NUM_COUNTERS_EXTRA 3 168#define NUM_COUNTERS_EXTRA 3
169 169
170static u32 *all_counters; 170#define ACPI_EVENT_VALID 0x01
171struct event_counter {
172 u32 count;
173 u32 flags;
174};
175
176static struct event_counter *all_counters;
171static u32 num_gpes; 177static u32 num_gpes;
172static u32 num_counters; 178static u32 num_counters;
173static struct attribute **all_attrs; 179static struct attribute **all_attrs;
@@ -202,9 +208,44 @@ static int count_num_gpes(void)
202 return count; 208 return count;
203} 209}
204 210
211static int get_gpe_device(int index, acpi_handle *handle)
212{
213 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
214 struct acpi_gpe_block_info *gpe_block;
215 acpi_cpu_flags flags;
216 struct acpi_namespace_node *node;
217
218 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
219
220 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
221 while (gpe_xrupt_info) {
222 gpe_block = gpe_xrupt_info->gpe_block_list_head;
223 node = gpe_block->node;
224 while (gpe_block) {
225 index -= gpe_block->register_count *
226 ACPI_GPE_REGISTER_WIDTH;
227 if (index < 0) {
228 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
229 /* return NULL if it's FADT GPE */
230 if (node->type != ACPI_TYPE_DEVICE)
231 *handle = NULL;
232 else
233 *handle = node;
234 return 0;
235 }
236 node = gpe_block->node;
237 gpe_block = gpe_block->next;
238 }
239 gpe_xrupt_info = gpe_xrupt_info->next;
240 }
241 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
242
243 return -ENODEV;
244}
245
205static void delete_gpe_attr_array(void) 246static void delete_gpe_attr_array(void)
206{ 247{
207 u32 *tmp = all_counters; 248 struct event_counter *tmp = all_counters;
208 249
209 all_counters = NULL; 250 all_counters = NULL;
210 kfree(tmp); 251 kfree(tmp);
@@ -230,9 +271,10 @@ void acpi_os_gpe_count(u32 gpe_number)
230 return; 271 return;
231 272
232 if (gpe_number < num_gpes) 273 if (gpe_number < num_gpes)
233 all_counters[gpe_number]++; 274 all_counters[gpe_number].count++;
234 else 275 else
235 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++; 276 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].
277 count++;
236 278
237 return; 279 return;
238} 280}
@@ -243,44 +285,144 @@ void acpi_os_fixed_event_count(u32 event_number)
243 return; 285 return;
244 286
245 if (event_number < ACPI_NUM_FIXED_EVENTS) 287 if (event_number < ACPI_NUM_FIXED_EVENTS)
246 all_counters[num_gpes + event_number]++; 288 all_counters[num_gpes + event_number].count++;
247 else 289 else
248 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++; 290 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR].
291 count++;
249 292
250 return; 293 return;
251} 294}
252 295
296static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
297{
298 int result = 0;
299
300 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
301 goto end;
302
303 if (index < num_gpes) {
304 result = get_gpe_device(index, handle);
305 if (result) {
306 ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
307 "Invalid GPE 0x%x\n", index));
308 goto end;
309 }
310 result = acpi_get_gpe_status(*handle, index,
311 ACPI_NOT_ISR, status);
312 } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
313 result = acpi_get_event_status(index - num_gpes, status);
314
315 /*
316 * sleep/power button GPE/Fixed Event is enabled after acpi_system_init,
317 * check the status at runtime and mark it as valid once it's enabled
318 */
319 if (!result && (*status & ACPI_EVENT_FLAG_ENABLED))
320 all_counters[index].flags |= ACPI_EVENT_VALID;
321end:
322 return result;
323}
324
253static ssize_t counter_show(struct kobject *kobj, 325static ssize_t counter_show(struct kobject *kobj,
254 struct kobj_attribute *attr, char *buf) 326 struct kobj_attribute *attr, char *buf)
255{ 327{
256 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI] = 328 int index = attr - counter_attrs;
329 int size;
330 acpi_handle handle;
331 acpi_event_status status;
332 int result = 0;
333
334 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
257 acpi_irq_handled; 335 acpi_irq_handled;
258 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE] = 336 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
259 acpi_gpe_count; 337 acpi_gpe_count;
260 338
261 return sprintf(buf, "%d\n", all_counters[attr - counter_attrs]); 339 size = sprintf(buf, "%8d", all_counters[index].count);
340
341 /* "gpe_all" or "sci" */
342 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
343 goto end;
344
345 result = get_status(index, &status, &handle);
346 if (result)
347 goto end;
348
349 if (!(all_counters[index].flags & ACPI_EVENT_VALID))
350 size += sprintf(buf + size, " invalid");
351 else if (status & ACPI_EVENT_FLAG_ENABLED)
352 size += sprintf(buf + size, " enable");
353 else
354 size += sprintf(buf + size, " disable");
355
356end:
357 size += sprintf(buf + size, "\n");
358 return result ? result : size;
262} 359}
263 360
264/* 361/*
265 * counter_set() sets the specified counter. 362 * counter_set() sets the specified counter.
266 * setting the total "sci" file to any value clears all counters. 363 * setting the total "sci" file to any value clears all counters.
364 * enable/disable/clear a gpe/fixed event in user space.
267 */ 365 */
268static ssize_t counter_set(struct kobject *kobj, 366static ssize_t counter_set(struct kobject *kobj,
269 struct kobj_attribute *attr, const char *buf, size_t size) 367 struct kobj_attribute *attr, const char *buf, size_t size)
270{ 368{
271 int index = attr - counter_attrs; 369 int index = attr - counter_attrs;
370 acpi_event_status status;
371 acpi_handle handle;
372 int result = 0;
272 373
273 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) { 374 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
274 int i; 375 int i;
275 for (i = 0; i < num_counters; ++i) 376 for (i = 0; i < num_counters; ++i)
276 all_counters[i] = 0; 377 all_counters[i].count = 0;
277 acpi_gpe_count = 0; 378 acpi_gpe_count = 0;
278 acpi_irq_handled = 0; 379 acpi_irq_handled = 0;
380 goto end;
381 }
279 382
383 /* show the event status for both GPEs and Fixed Events */
384 result = get_status(index, &status, &handle);
385 if (result)
386 goto end;
387
388 if (!(all_counters[index].flags & ACPI_EVENT_VALID)) {
389 ACPI_DEBUG_PRINT((ACPI_DB_WARN,
390 "Can not change Invalid GPE/Fixed Event status\n"));
391 return -EINVAL;
392 }
393
394 if (index < num_gpes) {
395 if (!strcmp(buf, "disable\n") &&
396 (status & ACPI_EVENT_FLAG_ENABLED))
397 result = acpi_disable_gpe(handle, index, ACPI_NOT_ISR);
398 else if (!strcmp(buf, "enable\n") &&
399 !(status & ACPI_EVENT_FLAG_ENABLED))
400 result = acpi_enable_gpe(handle, index, ACPI_NOT_ISR);
401 else if (!strcmp(buf, "clear\n") &&
402 (status & ACPI_EVENT_FLAG_SET))
403 result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
404 else
405 all_counters[index].count = strtoul(buf, NULL, 0);
406 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
407 int event = index - num_gpes;
408 if (!strcmp(buf, "disable\n") &&
409 (status & ACPI_EVENT_FLAG_ENABLED))
410 result = acpi_disable_event(event, ACPI_NOT_ISR);
411 else if (!strcmp(buf, "enable\n") &&
412 !(status & ACPI_EVENT_FLAG_ENABLED))
413 result = acpi_enable_event(event, ACPI_NOT_ISR);
414 else if (!strcmp(buf, "clear\n") &&
415 (status & ACPI_EVENT_FLAG_SET))
416 result = acpi_clear_event(event);
417 else
418 all_counters[index].count = strtoul(buf, NULL, 0);
280 } else 419 } else
281 all_counters[index] = strtoul(buf, NULL, 0); 420 all_counters[index].count = strtoul(buf, NULL, 0);
282 421
283 return size; 422 if (ACPI_FAILURE(result))
423 result = -EINVAL;
424end:
425 return result ? result : size;
284} 426}
285 427
286void acpi_irq_stats_init(void) 428void acpi_irq_stats_init(void)
@@ -298,7 +440,8 @@ void acpi_irq_stats_init(void)
298 if (all_attrs == NULL) 440 if (all_attrs == NULL)
299 return; 441 return;
300 442
301 all_counters = kzalloc(sizeof(u32) * (num_counters), GFP_KERNEL); 443 all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
444 GFP_KERNEL);
302 if (all_counters == NULL) 445 if (all_counters == NULL)
303 goto fail; 446 goto fail;
304 447
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
index 949d4114eb9f..ccb5b64bbef3 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/tables/tbfadt.c
@@ -124,7 +124,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
124 124
125static void inline 125static void inline
126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, 126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
127 u8 bit_width, u64 address) 127 u8 byte_width, u64 address)
128{ 128{
129 129
130 /* 130 /*
@@ -136,7 +136,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
136 /* All other fields are byte-wide */ 136 /* All other fields are byte-wide */
137 137
138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO; 138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO;
139 generic_address->bit_width = bit_width; 139 generic_address->bit_width = byte_width << 3;
140 generic_address->bit_offset = 0; 140 generic_address->bit_offset = 0;
141 generic_address->access_width = 0; 141 generic_address->access_width = 0;
142} 142}
@@ -155,7 +155,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
155 * 155 *
156 ******************************************************************************/ 156 ******************************************************************************/
157 157
158void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags) 158void acpi_tb_parse_fadt(u32 table_index, u8 flags)
159{ 159{
160 u32 length; 160 u32 length;
161 struct acpi_table_header *table; 161 struct acpi_table_header *table;
@@ -280,7 +280,7 @@ static void acpi_tb_convert_fadt(void)
280{ 280{
281 u8 pm1_register_length; 281 u8 pm1_register_length;
282 struct acpi_generic_address *target; 282 struct acpi_generic_address *target;
283 acpi_native_uint i; 283 u32 i;
284 284
285 /* Update the local FADT table header length */ 285 /* Update the local FADT table header length */
286 286
@@ -343,9 +343,11 @@ static void acpi_tb_convert_fadt(void)
343 * 343 *
344 * The PM event blocks are split into two register blocks, first is the 344 * The PM event blocks are split into two register blocks, first is the
345 * PM Status Register block, followed immediately by the PM Enable Register 345 * PM Status Register block, followed immediately by the PM Enable Register
346 * block. Each is of length (pm1_event_length/2) 346 * block. Each is of length (xpm1x_event_block.bit_width/2)
347 */ 347 */
348 pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length); 348 WARN_ON(ACPI_MOD_16(acpi_gbl_FADT.xpm1a_event_block.bit_width));
349 pm1_register_length = (u8) ACPI_DIV_16(acpi_gbl_FADT
350 .xpm1a_event_block.bit_width);
349 351
350 /* The PM1A register block is required */ 352 /* The PM1A register block is required */
351 353
@@ -360,14 +362,17 @@ static void acpi_tb_convert_fadt(void)
360 /* The PM1B register block is optional, ignore if not present */ 362 /* The PM1B register block is optional, ignore if not present */
361 363
362 if (acpi_gbl_FADT.xpm1b_event_block.address) { 364 if (acpi_gbl_FADT.xpm1b_event_block.address) {
365 WARN_ON(ACPI_MOD_16(acpi_gbl_FADT.xpm1b_event_block.bit_width));
366 pm1_register_length = (u8) ACPI_DIV_16(acpi_gbl_FADT
367 .xpm1b_event_block
368 .bit_width);
363 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable, 369 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
364 pm1_register_length, 370 pm1_register_length,
365 (acpi_gbl_FADT.xpm1b_event_block. 371 (acpi_gbl_FADT.xpm1b_event_block.
366 address + pm1_register_length)); 372 address + pm1_register_length));
367 /* Don't forget to copy space_id of the GAS */ 373 /* Don't forget to copy space_id of the GAS */
368 acpi_gbl_xpm1b_enable.space_id = 374 acpi_gbl_xpm1b_enable.space_id =
369 acpi_gbl_FADT.xpm1a_event_block.space_id; 375 acpi_gbl_FADT.xpm1b_event_block.space_id;
370
371 } 376 }
372} 377}
373 378
@@ -396,7 +401,7 @@ static void acpi_tb_validate_fadt(void)
396 u32 *address32; 401 u32 *address32;
397 struct acpi_generic_address *address64; 402 struct acpi_generic_address *address64;
398 u8 length; 403 u8 length;
399 acpi_native_uint i; 404 u32 i;
400 405
401 /* Examine all of the 64-bit extended address fields (X fields) */ 406 /* Examine all of the 64-bit extended address fields (X fields) */
402 407
diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/tables/tbfind.c
index 9ca3afc98c80..531584defbb8 100644
--- a/drivers/acpi/tables/tbfind.c
+++ b/drivers/acpi/tables/tbfind.c
@@ -65,10 +65,9 @@ ACPI_MODULE_NAME("tbfind")
65 ******************************************************************************/ 65 ******************************************************************************/
66acpi_status 66acpi_status
67acpi_tb_find_table(char *signature, 67acpi_tb_find_table(char *signature,
68 char *oem_id, 68 char *oem_id, char *oem_table_id, u32 *table_index)
69 char *oem_table_id, acpi_native_uint * table_index)
70{ 69{
71 acpi_native_uint i; 70 u32 i;
72 acpi_status status; 71 acpi_status status;
73 struct acpi_table_header header; 72 struct acpi_table_header header;
74 73
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index 5336ce88f89f..b22185f55a16 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -107,11 +107,10 @@ acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
107 ******************************************************************************/ 107 ******************************************************************************/
108 108
109acpi_status 109acpi_status
110acpi_tb_add_table(struct acpi_table_desc *table_desc, 110acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
111 acpi_native_uint * table_index)
112{ 111{
113 acpi_native_uint i; 112 u32 i;
114 acpi_native_uint length; 113 u32 length;
115 acpi_status status = AE_OK; 114 acpi_status status = AE_OK;
116 115
117 ACPI_FUNCTION_TRACE(tb_add_table); 116 ACPI_FUNCTION_TRACE(tb_add_table);
@@ -207,8 +206,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
207 206
208 /* Increase the Table Array size */ 207 /* Increase the Table Array size */
209 208
210 tables = ACPI_ALLOCATE_ZEROED((acpi_gbl_root_table_list.size + 209 tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
211 ACPI_ROOT_TABLE_SIZE_INCREMENT) 210 size + ACPI_ROOT_TABLE_SIZE_INCREMENT)
212 * sizeof(struct acpi_table_desc)); 211 * sizeof(struct acpi_table_desc));
213 if (!tables) { 212 if (!tables) {
214 ACPI_ERROR((AE_INFO, 213 ACPI_ERROR((AE_INFO,
@@ -220,7 +219,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
220 219
221 if (acpi_gbl_root_table_list.tables) { 220 if (acpi_gbl_root_table_list.tables) {
222 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, 221 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
223 acpi_gbl_root_table_list.size * 222 (acpi_size) acpi_gbl_root_table_list.size *
224 sizeof(struct acpi_table_desc)); 223 sizeof(struct acpi_table_desc));
225 224
226 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { 225 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
@@ -253,7 +252,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
253acpi_status 252acpi_status
254acpi_tb_store_table(acpi_physical_address address, 253acpi_tb_store_table(acpi_physical_address address,
255 struct acpi_table_header *table, 254 struct acpi_table_header *table,
256 u32 length, u8 flags, acpi_native_uint * table_index) 255 u32 length, u8 flags, u32 *table_index)
257{ 256{
258 acpi_status status = AE_OK; 257 acpi_status status = AE_OK;
259 258
@@ -334,7 +333,7 @@ void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
334 333
335void acpi_tb_terminate(void) 334void acpi_tb_terminate(void)
336{ 335{
337 acpi_native_uint i; 336 u32 i;
338 337
339 ACPI_FUNCTION_TRACE(tb_terminate); 338 ACPI_FUNCTION_TRACE(tb_terminate);
340 339
@@ -374,7 +373,7 @@ void acpi_tb_terminate(void)
374 * 373 *
375 ******************************************************************************/ 374 ******************************************************************************/
376 375
377void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index) 376void acpi_tb_delete_namespace_by_owner(u32 table_index)
378{ 377{
379 acpi_owner_id owner_id; 378 acpi_owner_id owner_id;
380 379
@@ -403,7 +402,7 @@ void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index)
403 * 402 *
404 ******************************************************************************/ 403 ******************************************************************************/
405 404
406acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index) 405acpi_status acpi_tb_allocate_owner_id(u32 table_index)
407{ 406{
408 acpi_status status = AE_BAD_PARAMETER; 407 acpi_status status = AE_BAD_PARAMETER;
409 408
@@ -431,7 +430,7 @@ acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index)
431 * 430 *
432 ******************************************************************************/ 431 ******************************************************************************/
433 432
434acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index) 433acpi_status acpi_tb_release_owner_id(u32 table_index)
435{ 434{
436 acpi_status status = AE_BAD_PARAMETER; 435 acpi_status status = AE_BAD_PARAMETER;
437 436
@@ -462,8 +461,7 @@ acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index)
462 * 461 *
463 ******************************************************************************/ 462 ******************************************************************************/
464 463
465acpi_status 464acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
466acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id)
467{ 465{
468 acpi_status status = AE_BAD_PARAMETER; 466 acpi_status status = AE_BAD_PARAMETER;
469 467
@@ -490,7 +488,7 @@ acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id)
490 * 488 *
491 ******************************************************************************/ 489 ******************************************************************************/
492 490
493u8 acpi_tb_is_table_loaded(acpi_native_uint table_index) 491u8 acpi_tb_is_table_loaded(u32 table_index)
494{ 492{
495 u8 is_loaded = FALSE; 493 u8 is_loaded = FALSE;
496 494
@@ -518,7 +516,7 @@ u8 acpi_tb_is_table_loaded(acpi_native_uint table_index)
518 * 516 *
519 ******************************************************************************/ 517 ******************************************************************************/
520 518
521void acpi_tb_set_table_loaded_flag(acpi_native_uint table_index, u8 is_loaded) 519void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
522{ 520{
523 521
524 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 522 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
index bc019b9b6a68..0cc92ef5236f 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/tables/tbutils.c
@@ -49,8 +49,8 @@ ACPI_MODULE_NAME("tbutils")
49 49
50/* Local prototypes */ 50/* Local prototypes */
51static acpi_physical_address 51static acpi_physical_address
52acpi_tb_get_root_table_entry(u8 * table_entry, 52acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
53 acpi_native_uint table_entry_size); 53
54/******************************************************************************* 54/*******************************************************************************
55 * 55 *
56 * FUNCTION: acpi_tb_check_xsdt 56 * FUNCTION: acpi_tb_check_xsdt
@@ -238,7 +238,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
238 * 238 *
239 ******************************************************************************/ 239 ******************************************************************************/
240 240
241u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length) 241u8 acpi_tb_checksum(u8 *buffer, u32 length)
242{ 242{
243 u8 sum = 0; 243 u8 sum = 0;
244 u8 *end = buffer + length; 244 u8 *end = buffer + length;
@@ -268,7 +268,7 @@ u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length)
268 268
269void 269void
270acpi_tb_install_table(acpi_physical_address address, 270acpi_tb_install_table(acpi_physical_address address,
271 u8 flags, char *signature, acpi_native_uint table_index) 271 u8 flags, char *signature, u32 table_index)
272{ 272{
273 struct acpi_table_header *table; 273 struct acpi_table_header *table;
274 274
@@ -336,8 +336,7 @@ acpi_tb_install_table(acpi_physical_address address,
336 ******************************************************************************/ 336 ******************************************************************************/
337 337
338static acpi_physical_address 338static acpi_physical_address
339acpi_tb_get_root_table_entry(u8 * table_entry, 339acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
340 acpi_native_uint table_entry_size)
341{ 340{
342 u64 address64; 341 u64 address64;
343 342
@@ -395,8 +394,8 @@ acpi_status __init
395acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) 394acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
396{ 395{
397 struct acpi_table_rsdp *rsdp; 396 struct acpi_table_rsdp *rsdp;
398 acpi_native_uint table_entry_size; 397 u32 table_entry_size;
399 acpi_native_uint i; 398 u32 i;
400 u32 table_count; 399 u32 table_count;
401 struct acpi_table_header *table; 400 struct acpi_table_header *table;
402 acpi_physical_address address; 401 acpi_physical_address address;
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 0e319604d3e7..fd7770aa1061 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -125,7 +125,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
125 /* Root Table Array has been statically allocated by the host */ 125 /* Root Table Array has been statically allocated by the host */
126 126
127 ACPI_MEMSET(initial_table_array, 0, 127 ACPI_MEMSET(initial_table_array, 0,
128 initial_table_count * 128 (acpi_size) initial_table_count *
129 sizeof(struct acpi_table_desc)); 129 sizeof(struct acpi_table_desc));
130 130
131 acpi_gbl_root_table_list.tables = initial_table_array; 131 acpi_gbl_root_table_list.tables = initial_table_array;
@@ -183,9 +183,9 @@ acpi_status acpi_reallocate_root_table(void)
183 return_ACPI_STATUS(AE_SUPPORT); 183 return_ACPI_STATUS(AE_SUPPORT);
184 } 184 }
185 185
186 new_size = 186 new_size = ((acpi_size) acpi_gbl_root_table_list.count +
187 (acpi_gbl_root_table_list.count + 187 ACPI_ROOT_TABLE_SIZE_INCREMENT) *
188 ACPI_ROOT_TABLE_SIZE_INCREMENT) * sizeof(struct acpi_table_desc); 188 sizeof(struct acpi_table_desc);
189 189
190 /* Create new array and copy the old array */ 190 /* Create new array and copy the old array */
191 191
@@ -222,7 +222,7 @@ acpi_status acpi_reallocate_root_table(void)
222acpi_status acpi_load_table(struct acpi_table_header *table_ptr) 222acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
223{ 223{
224 acpi_status status; 224 acpi_status status;
225 acpi_native_uint table_index; 225 u32 table_index;
226 struct acpi_table_desc table_desc; 226 struct acpi_table_desc table_desc;
227 227
228 if (!table_ptr) 228 if (!table_ptr)
@@ -264,11 +264,10 @@ ACPI_EXPORT_SYMBOL(acpi_load_table)
264 *****************************************************************************/ 264 *****************************************************************************/
265acpi_status 265acpi_status
266acpi_get_table_header(char *signature, 266acpi_get_table_header(char *signature,
267 acpi_native_uint instance, 267 u32 instance, struct acpi_table_header *out_table_header)
268 struct acpi_table_header * out_table_header)
269{ 268{
270 acpi_native_uint i; 269 u32 i;
271 acpi_native_uint j; 270 u32 j;
272 struct acpi_table_header *header; 271 struct acpi_table_header *header;
273 272
274 /* Parameter validation */ 273 /* Parameter validation */
@@ -378,10 +377,10 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
378 *****************************************************************************/ 377 *****************************************************************************/
379acpi_status 378acpi_status
380acpi_get_table(char *signature, 379acpi_get_table(char *signature,
381 acpi_native_uint instance, struct acpi_table_header **out_table) 380 u32 instance, struct acpi_table_header **out_table)
382{ 381{
383 acpi_native_uint i; 382 u32 i;
384 acpi_native_uint j; 383 u32 j;
385 acpi_status status; 384 acpi_status status;
386 385
387 /* Parameter validation */ 386 /* Parameter validation */
@@ -435,8 +434,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
435 * 434 *
436 ******************************************************************************/ 435 ******************************************************************************/
437acpi_status 436acpi_status
438acpi_get_table_by_index(acpi_native_uint table_index, 437acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
439 struct acpi_table_header ** table)
440{ 438{
441 acpi_status status; 439 acpi_status status;
442 440
@@ -493,7 +491,7 @@ static acpi_status acpi_tb_load_namespace(void)
493{ 491{
494 acpi_status status; 492 acpi_status status;
495 struct acpi_table_header *table; 493 struct acpi_table_header *table;
496 acpi_native_uint i; 494 u32 i;
497 495
498 ACPI_FUNCTION_TRACE(tb_load_namespace); 496 ACPI_FUNCTION_TRACE(tb_load_namespace);
499 497
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index b8c0dfa084f6..2d157e0f98d2 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -118,7 +118,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
118 * 118 *
119 ******************************************************************************/ 119 ******************************************************************************/
120 120
121acpi_status acpi_find_root_pointer(acpi_native_uint * table_address) 121acpi_status acpi_find_root_pointer(acpi_size *table_address)
122{ 122{
123 u8 *table_ptr; 123 u8 *table_ptr;
124 u8 *mem_rover; 124 u8 *mem_rover;
@@ -153,7 +153,7 @@ acpi_status acpi_find_root_pointer(acpi_native_uint * table_address)
153 * 1b) Search EBDA paragraphs (EBDA is required to be a 153 * 1b) Search EBDA paragraphs (EBDA is required to be a
154 * minimum of 1_k length) 154 * minimum of 1_k length)
155 */ 155 */
156 table_ptr = acpi_os_map_memory((acpi_native_uint) 156 table_ptr = acpi_os_map_memory((acpi_physical_address)
157 physical_address, 157 physical_address,
158 ACPI_EBDA_WINDOW_SIZE); 158 ACPI_EBDA_WINDOW_SIZE);
159 if (!table_ptr) { 159 if (!table_ptr) {
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index ede084829a70..3dfb8a442b26 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -309,7 +309,8 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
309 * 309 *
310 ******************************************************************************/ 310 ******************************************************************************/
311 311
312void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line) 312void *acpi_ut_allocate(acpi_size size,
313 u32 component, const char *module, u32 line)
313{ 314{
314 void *allocation; 315 void *allocation;
315 316
@@ -353,7 +354,7 @@ void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line)
353 ******************************************************************************/ 354 ******************************************************************************/
354 355
355void *acpi_ut_allocate_zeroed(acpi_size size, 356void *acpi_ut_allocate_zeroed(acpi_size size,
356 u32 component, char *module, u32 line) 357 u32 component, const char *module, u32 line)
357{ 358{
358 void *allocation; 359 void *allocation;
359 360
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 655c290aca7b..53499ac90988 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -572,7 +572,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
572 acpi_status status = AE_OK; 572 acpi_status status = AE_OK;
573 union acpi_operand_object *package_object; 573 union acpi_operand_object *package_object;
574 union acpi_operand_object **package_elements; 574 union acpi_operand_object **package_elements;
575 acpi_native_uint i; 575 u32 i;
576 576
577 ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage); 577 ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage);
578 578
@@ -599,7 +599,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
599 599
600 /* Truncate package and delete it */ 600 /* Truncate package and delete it */
601 601
602 package_object->package.count = (u32) i; 602 package_object->package.count = i;
603 package_elements[i] = NULL; 603 package_elements[i] = NULL;
604 acpi_ut_remove_reference(package_object); 604 acpi_ut_remove_reference(package_object);
605 return_ACPI_STATUS(status); 605 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index f938f465efa4..fd66ecb6741e 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -157,7 +157,8 @@ void ACPI_INTERNAL_VAR_XFACE
157acpi_ut_debug_print(u32 requested_debug_level, 157acpi_ut_debug_print(u32 requested_debug_level,
158 u32 line_number, 158 u32 line_number,
159 const char *function_name, 159 const char *function_name,
160 char *module_name, u32 component_id, char *format, ...) 160 const char *module_name,
161 u32 component_id, const char *format, ...)
161{ 162{
162 acpi_thread_id thread_id; 163 acpi_thread_id thread_id;
163 va_list args; 164 va_list args;
@@ -228,7 +229,8 @@ void ACPI_INTERNAL_VAR_XFACE
228acpi_ut_debug_print_raw(u32 requested_debug_level, 229acpi_ut_debug_print_raw(u32 requested_debug_level,
229 u32 line_number, 230 u32 line_number,
230 const char *function_name, 231 const char *function_name,
231 char *module_name, u32 component_id, char *format, ...) 232 const char *module_name,
233 u32 component_id, const char *format, ...)
232{ 234{
233 va_list args; 235 va_list args;
234 236
@@ -261,7 +263,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw)
261 ******************************************************************************/ 263 ******************************************************************************/
262void 264void
263acpi_ut_trace(u32 line_number, 265acpi_ut_trace(u32 line_number,
264 const char *function_name, char *module_name, u32 component_id) 266 const char *function_name,
267 const char *module_name, u32 component_id)
265{ 268{
266 269
267 acpi_gbl_nesting_level++; 270 acpi_gbl_nesting_level++;
@@ -293,7 +296,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace)
293void 296void
294acpi_ut_trace_ptr(u32 line_number, 297acpi_ut_trace_ptr(u32 line_number,
295 const char *function_name, 298 const char *function_name,
296 char *module_name, u32 component_id, void *pointer) 299 const char *module_name, u32 component_id, void *pointer)
297{ 300{
298 acpi_gbl_nesting_level++; 301 acpi_gbl_nesting_level++;
299 acpi_ut_track_stack_ptr(); 302 acpi_ut_track_stack_ptr();
@@ -324,7 +327,7 @@ acpi_ut_trace_ptr(u32 line_number,
324void 327void
325acpi_ut_trace_str(u32 line_number, 328acpi_ut_trace_str(u32 line_number,
326 const char *function_name, 329 const char *function_name,
327 char *module_name, u32 component_id, char *string) 330 const char *module_name, u32 component_id, char *string)
328{ 331{
329 332
330 acpi_gbl_nesting_level++; 333 acpi_gbl_nesting_level++;
@@ -356,7 +359,7 @@ acpi_ut_trace_str(u32 line_number,
356void 359void
357acpi_ut_trace_u32(u32 line_number, 360acpi_ut_trace_u32(u32 line_number,
358 const char *function_name, 361 const char *function_name,
359 char *module_name, u32 component_id, u32 integer) 362 const char *module_name, u32 component_id, u32 integer)
360{ 363{
361 364
362 acpi_gbl_nesting_level++; 365 acpi_gbl_nesting_level++;
@@ -386,7 +389,8 @@ acpi_ut_trace_u32(u32 line_number,
386 389
387void 390void
388acpi_ut_exit(u32 line_number, 391acpi_ut_exit(u32 line_number,
389 const char *function_name, char *module_name, u32 component_id) 392 const char *function_name,
393 const char *module_name, u32 component_id)
390{ 394{
391 395
392 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 396 acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
@@ -417,7 +421,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit)
417void 421void
418acpi_ut_status_exit(u32 line_number, 422acpi_ut_status_exit(u32 line_number,
419 const char *function_name, 423 const char *function_name,
420 char *module_name, u32 component_id, acpi_status status) 424 const char *module_name,
425 u32 component_id, acpi_status status)
421{ 426{
422 427
423 if (ACPI_SUCCESS(status)) { 428 if (ACPI_SUCCESS(status)) {
@@ -458,7 +463,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
458void 463void
459acpi_ut_value_exit(u32 line_number, 464acpi_ut_value_exit(u32 line_number,
460 const char *function_name, 465 const char *function_name,
461 char *module_name, u32 component_id, acpi_integer value) 466 const char *module_name,
467 u32 component_id, acpi_integer value)
462{ 468{
463 469
464 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 470 acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
@@ -490,7 +496,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
490void 496void
491acpi_ut_ptr_exit(u32 line_number, 497acpi_ut_ptr_exit(u32 line_number,
492 const char *function_name, 498 const char *function_name,
493 char *module_name, u32 component_id, u8 * ptr) 499 const char *module_name, u32 component_id, u8 *ptr)
494{ 500{
495 501
496 acpi_ut_debug_print(ACPI_LV_FUNCTIONS, 502 acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
@@ -519,8 +525,8 @@ acpi_ut_ptr_exit(u32 line_number,
519 525
520void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) 526void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
521{ 527{
522 acpi_native_uint i = 0; 528 u32 i = 0;
523 acpi_native_uint j; 529 u32 j;
524 u32 temp32; 530 u32 temp32;
525 u8 buf_char; 531 u8 buf_char;
526 532
@@ -539,7 +545,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
539 545
540 /* Print current offset */ 546 /* Print current offset */
541 547
542 acpi_os_printf("%6.4X: ", (u32) i); 548 acpi_os_printf("%6.4X: ", i);
543 549
544 /* Print 16 hex chars */ 550 /* Print 16 hex chars */
545 551
@@ -549,7 +555,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
549 /* Dump fill spaces */ 555 /* Dump fill spaces */
550 556
551 acpi_os_printf("%*s", ((display * 2) + 1), " "); 557 acpi_os_printf("%*s", ((display * 2) + 1), " ");
552 j += (acpi_native_uint) display; 558 j += display;
553 continue; 559 continue;
554 } 560 }
555 561
@@ -557,32 +563,38 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
557 case DB_BYTE_DISPLAY: 563 case DB_BYTE_DISPLAY:
558 default: /* Default is BYTE display */ 564 default: /* Default is BYTE display */
559 565
560 acpi_os_printf("%02X ", buffer[i + j]); 566 acpi_os_printf("%02X ",
567 buffer[(acpi_size) i + j]);
561 break; 568 break;
562 569
563 case DB_WORD_DISPLAY: 570 case DB_WORD_DISPLAY:
564 571
565 ACPI_MOVE_16_TO_32(&temp32, &buffer[i + j]); 572 ACPI_MOVE_16_TO_32(&temp32,
573 &buffer[(acpi_size) i + j]);
566 acpi_os_printf("%04X ", temp32); 574 acpi_os_printf("%04X ", temp32);
567 break; 575 break;
568 576
569 case DB_DWORD_DISPLAY: 577 case DB_DWORD_DISPLAY:
570 578
571 ACPI_MOVE_32_TO_32(&temp32, &buffer[i + j]); 579 ACPI_MOVE_32_TO_32(&temp32,
580 &buffer[(acpi_size) i + j]);
572 acpi_os_printf("%08X ", temp32); 581 acpi_os_printf("%08X ", temp32);
573 break; 582 break;
574 583
575 case DB_QWORD_DISPLAY: 584 case DB_QWORD_DISPLAY:
576 585
577 ACPI_MOVE_32_TO_32(&temp32, &buffer[i + j]); 586 ACPI_MOVE_32_TO_32(&temp32,
587 &buffer[(acpi_size) i + j]);
578 acpi_os_printf("%08X", temp32); 588 acpi_os_printf("%08X", temp32);
579 589
580 ACPI_MOVE_32_TO_32(&temp32, &buffer[i + j + 4]); 590 ACPI_MOVE_32_TO_32(&temp32,
591 &buffer[(acpi_size) i + j +
592 4]);
581 acpi_os_printf("%08X ", temp32); 593 acpi_os_printf("%08X ", temp32);
582 break; 594 break;
583 } 595 }
584 596
585 j += (acpi_native_uint) display; 597 j += display;
586 } 598 }
587 599
588 /* 600 /*
@@ -596,7 +608,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
596 return; 608 return;
597 } 609 }
598 610
599 buf_char = buffer[i + j]; 611 buf_char = buffer[(acpi_size) i + j];
600 if (ACPI_IS_PRINT(buf_char)) { 612 if (ACPI_IS_PRINT(buf_char)) {
601 acpi_os_printf("%c", buf_char); 613 acpi_os_printf("%c", buf_char);
602 } else { 614 } else {
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 1fbc35139e84..c5c791a575c9 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -442,7 +442,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
442 union acpi_generic_state *state_list = NULL; 442 union acpi_generic_state *state_list = NULL;
443 union acpi_operand_object *next_object = NULL; 443 union acpi_operand_object *next_object = NULL;
444 union acpi_generic_state *state; 444 union acpi_generic_state *state;
445 acpi_native_uint i; 445 u32 i;
446 446
447 ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object); 447 ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object);
448 448
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
index 05e61be267d5..352747e49c7a 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/utilities/uteval.c
@@ -97,7 +97,7 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
97 acpi_status status; 97 acpi_status status;
98 union acpi_operand_object *string_desc; 98 union acpi_operand_object *string_desc;
99 union acpi_operand_object *return_desc; 99 union acpi_operand_object *return_desc;
100 acpi_native_uint i; 100 u32 i;
101 101
102 ACPI_FUNCTION_TRACE(ut_osi_implementation); 102 ACPI_FUNCTION_TRACE(ut_osi_implementation);
103 103
@@ -217,7 +217,6 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
217 217
218 info->prefix_node = prefix_node; 218 info->prefix_node = prefix_node;
219 info->pathname = path; 219 info->pathname = path;
220 info->parameter_type = ACPI_PARAM_ARGS;
221 220
222 /* Evaluate the object/method */ 221 /* Evaluate the object/method */
223 222
@@ -514,7 +513,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
514 u32 count; 513 u32 count;
515 u32 size; 514 u32 size;
516 struct acpi_compatible_id_list *cid_list; 515 struct acpi_compatible_id_list *cid_list;
517 acpi_native_uint i; 516 u32 i;
518 517
519 ACPI_FUNCTION_TRACE(ut_execute_CID); 518 ACPI_FUNCTION_TRACE(ut_execute_CID);
520 519
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 1f057b71db1a..f34be6773556 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -64,7 +64,7 @@ ACPI_MODULE_NAME("utmisc")
64 ******************************************************************************/ 64 ******************************************************************************/
65const char *acpi_ut_validate_exception(acpi_status status) 65const char *acpi_ut_validate_exception(acpi_status status)
66{ 66{
67 acpi_status sub_status; 67 u32 sub_status;
68 const char *exception = NULL; 68 const char *exception = NULL;
69 69
70 ACPI_FUNCTION_ENTRY(); 70 ACPI_FUNCTION_ENTRY();
@@ -85,32 +85,28 @@ const char *acpi_ut_validate_exception(acpi_status status)
85 case AE_CODE_PROGRAMMER: 85 case AE_CODE_PROGRAMMER:
86 86
87 if (sub_status <= AE_CODE_PGM_MAX) { 87 if (sub_status <= AE_CODE_PGM_MAX) {
88 exception = 88 exception = acpi_gbl_exception_names_pgm[sub_status];
89 acpi_gbl_exception_names_pgm[sub_status - 1];
90 } 89 }
91 break; 90 break;
92 91
93 case AE_CODE_ACPI_TABLES: 92 case AE_CODE_ACPI_TABLES:
94 93
95 if (sub_status <= AE_CODE_TBL_MAX) { 94 if (sub_status <= AE_CODE_TBL_MAX) {
96 exception = 95 exception = acpi_gbl_exception_names_tbl[sub_status];
97 acpi_gbl_exception_names_tbl[sub_status - 1];
98 } 96 }
99 break; 97 break;
100 98
101 case AE_CODE_AML: 99 case AE_CODE_AML:
102 100
103 if (sub_status <= AE_CODE_AML_MAX) { 101 if (sub_status <= AE_CODE_AML_MAX) {
104 exception = 102 exception = acpi_gbl_exception_names_aml[sub_status];
105 acpi_gbl_exception_names_aml[sub_status - 1];
106 } 103 }
107 break; 104 break;
108 105
109 case AE_CODE_CONTROL: 106 case AE_CODE_CONTROL:
110 107
111 if (sub_status <= AE_CODE_CTRL_MAX) { 108 if (sub_status <= AE_CODE_CTRL_MAX) {
112 exception = 109 exception = acpi_gbl_exception_names_ctrl[sub_status];
113 acpi_gbl_exception_names_ctrl[sub_status - 1];
114 } 110 }
115 break; 111 break;
116 112
@@ -165,9 +161,9 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
165 161
166acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) 162acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
167{ 163{
168 acpi_native_uint i; 164 u32 i;
169 acpi_native_uint j; 165 u32 j;
170 acpi_native_uint k; 166 u32 k;
171 acpi_status status; 167 acpi_status status;
172 168
173 ACPI_FUNCTION_TRACE(ut_allocate_owner_id); 169 ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
@@ -273,7 +269,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
273{ 269{
274 acpi_owner_id owner_id = *owner_id_ptr; 270 acpi_owner_id owner_id = *owner_id_ptr;
275 acpi_status status; 271 acpi_status status;
276 acpi_native_uint index; 272 u32 index;
277 u32 bit; 273 u32 bit;
278 274
279 ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id); 275 ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
@@ -593,7 +589,7 @@ acpi_ut_display_init_pathname(u8 type,
593 * 589 *
594 ******************************************************************************/ 590 ******************************************************************************/
595 591
596u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position) 592u8 acpi_ut_valid_acpi_char(char character, u32 position)
597{ 593{
598 594
599 if (!((character >= 'A' && character <= 'Z') || 595 if (!((character >= 'A' && character <= 'Z') ||
@@ -628,7 +624,7 @@ u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position)
628 624
629u8 acpi_ut_valid_acpi_name(u32 name) 625u8 acpi_ut_valid_acpi_name(u32 name)
630{ 626{
631 acpi_native_uint i; 627 u32 i;
632 628
633 ACPI_FUNCTION_ENTRY(); 629 ACPI_FUNCTION_ENTRY();
634 630
@@ -657,7 +653,7 @@ u8 acpi_ut_valid_acpi_name(u32 name)
657 653
658acpi_name acpi_ut_repair_name(char *name) 654acpi_name acpi_ut_repair_name(char *name)
659{ 655{
660 acpi_native_uint i; 656 u32 i;
661 char new_name[ACPI_NAME_SIZE]; 657 char new_name[ACPI_NAME_SIZE];
662 658
663 for (i = 0; i < ACPI_NAME_SIZE; i++) { 659 for (i = 0; i < ACPI_NAME_SIZE; i++) {
@@ -1024,7 +1020,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
1024 ******************************************************************************/ 1020 ******************************************************************************/
1025 1021
1026void ACPI_INTERNAL_VAR_XFACE 1022void ACPI_INTERNAL_VAR_XFACE
1027acpi_ut_error(char *module_name, u32 line_number, char *format, ...) 1023acpi_ut_error(const char *module_name, u32 line_number, const char *format, ...)
1028{ 1024{
1029 va_list args; 1025 va_list args;
1030 1026
@@ -1037,8 +1033,8 @@ acpi_ut_error(char *module_name, u32 line_number, char *format, ...)
1037} 1033}
1038 1034
1039void ACPI_INTERNAL_VAR_XFACE 1035void ACPI_INTERNAL_VAR_XFACE
1040acpi_ut_exception(char *module_name, 1036acpi_ut_exception(const char *module_name,
1041 u32 line_number, acpi_status status, char *format, ...) 1037 u32 line_number, acpi_status status, const char *format, ...)
1042{ 1038{
1043 va_list args; 1039 va_list args;
1044 1040
@@ -1054,7 +1050,8 @@ acpi_ut_exception(char *module_name,
1054EXPORT_SYMBOL(acpi_ut_exception); 1050EXPORT_SYMBOL(acpi_ut_exception);
1055 1051
1056void ACPI_INTERNAL_VAR_XFACE 1052void ACPI_INTERNAL_VAR_XFACE
1057acpi_ut_warning(char *module_name, u32 line_number, char *format, ...) 1053acpi_ut_warning(const char *module_name,
1054 u32 line_number, const char *format, ...)
1058{ 1055{
1059 va_list args; 1056 va_list args;
1060 1057
@@ -1067,7 +1064,7 @@ acpi_ut_warning(char *module_name, u32 line_number, char *format, ...)
1067} 1064}
1068 1065
1069void ACPI_INTERNAL_VAR_XFACE 1066void ACPI_INTERNAL_VAR_XFACE
1070acpi_ut_info(char *module_name, u32 line_number, char *format, ...) 1067acpi_ut_info(const char *module_name, u32 line_number, const char *format, ...)
1071{ 1068{
1072 va_list args; 1069 va_list args;
1073 1070
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index f7d602b1a894..7331dde9e1b3 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -218,7 +218,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
218 * the mutex ordering rule. This indicates a coding error somewhere in 218 * the mutex ordering rule. This indicates a coding error somewhere in
219 * the ACPI subsystem code. 219 * the ACPI subsystem code.
220 */ 220 */
221 for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) { 221 for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
222 if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { 222 if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
223 if (i == mutex_id) { 223 if (i == mutex_id) {
224 ACPI_ERROR((AE_INFO, 224 ACPI_ERROR((AE_INFO,
@@ -315,7 +315,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
315 * ordering rule. This indicates a coding error somewhere in 315 * ordering rule. This indicates a coding error somewhere in
316 * the ACPI subsystem code. 316 * the ACPI subsystem code.
317 */ 317 */
318 for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) { 318 for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
319 if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { 319 if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
320 if (i == mutex_id) { 320 if (i == mutex_id) {
321 continue; 321 continue;
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index e68466de8044..e25484495e65 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -83,7 +83,8 @@ acpi_ut_get_element_length(u8 object_type,
83 * 83 *
84 ******************************************************************************/ 84 ******************************************************************************/
85 85
86union acpi_operand_object *acpi_ut_create_internal_object_dbg(char *module_name, 86union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char
87 *module_name,
87 u32 line_number, 88 u32 line_number,
88 u32 component_id, 89 u32 component_id,
89 acpi_object_type 90 acpi_object_type
@@ -175,8 +176,8 @@ union acpi_operand_object *acpi_ut_create_package_object(u32 count)
175 * Create the element array. Count+1 allows the array to be null 176 * Create the element array. Count+1 allows the array to be null
176 * terminated. 177 * terminated.
177 */ 178 */
178 package_elements = ACPI_ALLOCATE_ZEROED((acpi_size) 179 package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
179 (count + 1) * sizeof(void *)); 180 1) * sizeof(void *));
180 if (!package_elements) { 181 if (!package_elements) {
181 acpi_ut_remove_reference(package_desc); 182 acpi_ut_remove_reference(package_desc);
182 return_PTR(NULL); 183 return_PTR(NULL);
@@ -347,7 +348,7 @@ u8 acpi_ut_valid_internal_object(void *object)
347 * 348 *
348 ******************************************************************************/ 349 ******************************************************************************/
349 350
350void *acpi_ut_allocate_object_desc_dbg(char *module_name, 351void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
351 u32 line_number, u32 component_id) 352 u32 line_number, u32 component_id)
352{ 353{
353 union acpi_operand_object *object; 354 union acpi_operand_object *object;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index d089c4519d45..64c889331f3b 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -631,6 +631,76 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
631 * device : video output device (LCD, CRT, ..) 631 * device : video output device (LCD, CRT, ..)
632 * 632 *
633 * Return Value: 633 * Return Value:
634 * Maximum brightness level
635 *
636 * Allocate and initialize device->brightness.
637 */
638
639static int
640acpi_video_init_brightness(struct acpi_video_device *device)
641{
642 union acpi_object *obj = NULL;
643 int i, max_level = 0, count = 0;
644 union acpi_object *o;
645 struct acpi_video_device_brightness *br = NULL;
646
647 if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
648 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
649 "LCD brightness level\n"));
650 goto out;
651 }
652
653 if (obj->package.count < 2)
654 goto out;
655
656 br = kzalloc(sizeof(*br), GFP_KERNEL);
657 if (!br) {
658 printk(KERN_ERR "can't allocate memory\n");
659 goto out;
660 }
661
662 br->levels = kmalloc(obj->package.count * sizeof *(br->levels),
663 GFP_KERNEL);
664 if (!br->levels)
665 goto out_free;
666
667 for (i = 0; i < obj->package.count; i++) {
668 o = (union acpi_object *)&obj->package.elements[i];
669 if (o->type != ACPI_TYPE_INTEGER) {
670 printk(KERN_ERR PREFIX "Invalid data\n");
671 continue;
672 }
673 br->levels[count] = (u32) o->integer.value;
674
675 if (br->levels[count] > max_level)
676 max_level = br->levels[count];
677 count++;
678 }
679
680 if (count < 2)
681 goto out_free_levels;
682
683 br->count = count;
684 device->brightness = br;
685 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "found %d brightness levels\n", count));
686 kfree(obj);
687 return max_level;
688
689out_free_levels:
690 kfree(br->levels);
691out_free:
692 kfree(br);
693out:
694 device->brightness = NULL;
695 kfree(obj);
696 return 0;
697}
698
699/*
700 * Arg:
701 * device : video output device (LCD, CRT, ..)
702 *
703 * Return Value:
634 * None 704 * None
635 * 705 *
636 * Find out all required AML methods defined under the output 706 * Find out all required AML methods defined under the output
@@ -640,10 +710,7 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
640static void acpi_video_device_find_cap(struct acpi_video_device *device) 710static void acpi_video_device_find_cap(struct acpi_video_device *device)
641{ 711{
642 acpi_handle h_dummy1; 712 acpi_handle h_dummy1;
643 int i;
644 u32 max_level = 0; 713 u32 max_level = 0;
645 union acpi_object *obj = NULL;
646 struct acpi_video_device_brightness *br = NULL;
647 714
648 715
649 memset(&device->cap, 0, sizeof(device->cap)); 716 memset(&device->cap, 0, sizeof(device->cap));
@@ -672,53 +739,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
672 device->cap._DSS = 1; 739 device->cap._DSS = 1;
673 } 740 }
674 741
675 if (ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { 742 max_level = acpi_video_init_brightness(device);
676
677 if (obj->package.count >= 2) {
678 int count = 0;
679 union acpi_object *o;
680
681 br = kzalloc(sizeof(*br), GFP_KERNEL);
682 if (!br) {
683 printk(KERN_ERR "can't allocate memory\n");
684 } else {
685 br->levels = kmalloc(obj->package.count *
686 sizeof *(br->levels), GFP_KERNEL);
687 if (!br->levels)
688 goto out;
689
690 for (i = 0; i < obj->package.count; i++) {
691 o = (union acpi_object *)&obj->package.
692 elements[i];
693 if (o->type != ACPI_TYPE_INTEGER) {
694 printk(KERN_ERR PREFIX "Invalid data\n");
695 continue;
696 }
697 br->levels[count] = (u32) o->integer.value;
698
699 if (br->levels[count] > max_level)
700 max_level = br->levels[count];
701 count++;
702 }
703 out:
704 if (count < 2) {
705 kfree(br->levels);
706 kfree(br);
707 } else {
708 br->count = count;
709 device->brightness = br;
710 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
711 "found %d brightness levels\n",
712 count));
713 }
714 }
715 }
716
717 } else {
718 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available LCD brightness level\n"));
719 }
720
721 kfree(obj);
722 743
723 if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){ 744 if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){
724 int result; 745 int result;
@@ -1695,6 +1716,8 @@ static void
1695acpi_video_switch_brightness(struct acpi_video_device *device, int event) 1716acpi_video_switch_brightness(struct acpi_video_device *device, int event)
1696{ 1717{
1697 unsigned long level_current, level_next; 1718 unsigned long level_current, level_next;
1719 if (!device->brightness)
1720 return;
1698 acpi_video_device_lcd_get_level_current(device, &level_current); 1721 acpi_video_device_lcd_get_level_current(device, &level_current);
1699 level_next = acpi_video_get_next_level(device, level_current, event); 1722 level_next = acpi_video_get_next_level(device, level_current, event);
1700 acpi_video_device_lcd_set_level(device, level_next); 1723 acpi_video_device_lcd_set_level(device, level_next);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index bc79df6e7cb0..a9e827356d06 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -16,10 +16,10 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/libata.h> 18#include <linux/libata.h>
19#include <linux/of_platform.h>
19 20
20#include <asm/types.h> 21#include <asm/types.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
22#include <asm/of_platform.h>
23#include <asm/mpc52xx.h> 23#include <asm/mpc52xx.h>
24 24
25 25
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 911ec600fe71..3f940393d6c7 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -453,6 +453,8 @@ int platform_driver_register(struct platform_driver *drv)
453 drv->driver.suspend = platform_drv_suspend; 453 drv->driver.suspend = platform_drv_suspend;
454 if (drv->resume) 454 if (drv->resume)
455 drv->driver.resume = platform_drv_resume; 455 drv->driver.resume = platform_drv_resume;
456 if (drv->pm)
457 drv->driver.pm = &drv->pm->base;
456 return driver_register(&drv->driver); 458 return driver_register(&drv->driver);
457} 459}
458EXPORT_SYMBOL_GPL(platform_driver_register); 460EXPORT_SYMBOL_GPL(platform_driver_register);
@@ -560,7 +562,9 @@ static int platform_match(struct device *dev, struct device_driver *drv)
560 return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0); 562 return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0);
561} 563}
562 564
563static int platform_suspend(struct device *dev, pm_message_t mesg) 565#ifdef CONFIG_PM_SLEEP
566
567static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
564{ 568{
565 int ret = 0; 569 int ret = 0;
566 570
@@ -570,7 +574,7 @@ static int platform_suspend(struct device *dev, pm_message_t mesg)
570 return ret; 574 return ret;
571} 575}
572 576
573static int platform_suspend_late(struct device *dev, pm_message_t mesg) 577static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg)
574{ 578{
575 struct platform_driver *drv = to_platform_driver(dev->driver); 579 struct platform_driver *drv = to_platform_driver(dev->driver);
576 struct platform_device *pdev; 580 struct platform_device *pdev;
@@ -583,7 +587,7 @@ static int platform_suspend_late(struct device *dev, pm_message_t mesg)
583 return ret; 587 return ret;
584} 588}
585 589
586static int platform_resume_early(struct device *dev) 590static int platform_legacy_resume_early(struct device *dev)
587{ 591{
588 struct platform_driver *drv = to_platform_driver(dev->driver); 592 struct platform_driver *drv = to_platform_driver(dev->driver);
589 struct platform_device *pdev; 593 struct platform_device *pdev;
@@ -596,7 +600,7 @@ static int platform_resume_early(struct device *dev)
596 return ret; 600 return ret;
597} 601}
598 602
599static int platform_resume(struct device *dev) 603static int platform_legacy_resume(struct device *dev)
600{ 604{
601 int ret = 0; 605 int ret = 0;
602 606
@@ -606,15 +610,291 @@ static int platform_resume(struct device *dev)
606 return ret; 610 return ret;
607} 611}
608 612
613static int platform_pm_prepare(struct device *dev)
614{
615 struct device_driver *drv = dev->driver;
616 int ret = 0;
617
618 if (drv && drv->pm && drv->pm->prepare)
619 ret = drv->pm->prepare(dev);
620
621 return ret;
622}
623
624static void platform_pm_complete(struct device *dev)
625{
626 struct device_driver *drv = dev->driver;
627
628 if (drv && drv->pm && drv->pm->complete)
629 drv->pm->complete(dev);
630}
631
632#ifdef CONFIG_SUSPEND
633
634static int platform_pm_suspend(struct device *dev)
635{
636 struct device_driver *drv = dev->driver;
637 int ret = 0;
638
639 if (drv && drv->pm) {
640 if (drv->pm->suspend)
641 ret = drv->pm->suspend(dev);
642 } else {
643 ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
644 }
645
646 return ret;
647}
648
649static int platform_pm_suspend_noirq(struct device *dev)
650{
651 struct platform_driver *pdrv;
652 int ret = 0;
653
654 if (!dev->driver)
655 return 0;
656
657 pdrv = to_platform_driver(dev->driver);
658 if (pdrv->pm) {
659 if (pdrv->pm->suspend_noirq)
660 ret = pdrv->pm->suspend_noirq(dev);
661 } else {
662 ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND);
663 }
664
665 return ret;
666}
667
668static int platform_pm_resume(struct device *dev)
669{
670 struct device_driver *drv = dev->driver;
671 int ret = 0;
672
673 if (drv && drv->pm) {
674 if (drv->pm->resume)
675 ret = drv->pm->resume(dev);
676 } else {
677 ret = platform_legacy_resume(dev);
678 }
679
680 return ret;
681}
682
683static int platform_pm_resume_noirq(struct device *dev)
684{
685 struct platform_driver *pdrv;
686 int ret = 0;
687
688 if (!dev->driver)
689 return 0;
690
691 pdrv = to_platform_driver(dev->driver);
692 if (pdrv->pm) {
693 if (pdrv->pm->resume_noirq)
694 ret = pdrv->pm->resume_noirq(dev);
695 } else {
696 ret = platform_legacy_resume_early(dev);
697 }
698
699 return ret;
700}
701
702#else /* !CONFIG_SUSPEND */
703
704#define platform_pm_suspend NULL
705#define platform_pm_resume NULL
706#define platform_pm_suspend_noirq NULL
707#define platform_pm_resume_noirq NULL
708
709#endif /* !CONFIG_SUSPEND */
710
711#ifdef CONFIG_HIBERNATION
712
713static int platform_pm_freeze(struct device *dev)
714{
715 struct device_driver *drv = dev->driver;
716 int ret = 0;
717
718 if (!drv)
719 return 0;
720
721 if (drv->pm) {
722 if (drv->pm->freeze)
723 ret = drv->pm->freeze(dev);
724 } else {
725 ret = platform_legacy_suspend(dev, PMSG_FREEZE);
726 }
727
728 return ret;
729}
730
731static int platform_pm_freeze_noirq(struct device *dev)
732{
733 struct platform_driver *pdrv;
734 int ret = 0;
735
736 if (!dev->driver)
737 return 0;
738
739 pdrv = to_platform_driver(dev->driver);
740 if (pdrv->pm) {
741 if (pdrv->pm->freeze_noirq)
742 ret = pdrv->pm->freeze_noirq(dev);
743 } else {
744 ret = platform_legacy_suspend_late(dev, PMSG_FREEZE);
745 }
746
747 return ret;
748}
749
750static int platform_pm_thaw(struct device *dev)
751{
752 struct device_driver *drv = dev->driver;
753 int ret = 0;
754
755 if (drv && drv->pm) {
756 if (drv->pm->thaw)
757 ret = drv->pm->thaw(dev);
758 } else {
759 ret = platform_legacy_resume(dev);
760 }
761
762 return ret;
763}
764
765static int platform_pm_thaw_noirq(struct device *dev)
766{
767 struct platform_driver *pdrv;
768 int ret = 0;
769
770 if (!dev->driver)
771 return 0;
772
773 pdrv = to_platform_driver(dev->driver);
774 if (pdrv->pm) {
775 if (pdrv->pm->thaw_noirq)
776 ret = pdrv->pm->thaw_noirq(dev);
777 } else {
778 ret = platform_legacy_resume_early(dev);
779 }
780
781 return ret;
782}
783
784static int platform_pm_poweroff(struct device *dev)
785{
786 struct device_driver *drv = dev->driver;
787 int ret = 0;
788
789 if (drv && drv->pm) {
790 if (drv->pm->poweroff)
791 ret = drv->pm->poweroff(dev);
792 } else {
793 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
794 }
795
796 return ret;
797}
798
799static int platform_pm_poweroff_noirq(struct device *dev)
800{
801 struct platform_driver *pdrv;
802 int ret = 0;
803
804 if (!dev->driver)
805 return 0;
806
807 pdrv = to_platform_driver(dev->driver);
808 if (pdrv->pm) {
809 if (pdrv->pm->poweroff_noirq)
810 ret = pdrv->pm->poweroff_noirq(dev);
811 } else {
812 ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE);
813 }
814
815 return ret;
816}
817
818static int platform_pm_restore(struct device *dev)
819{
820 struct device_driver *drv = dev->driver;
821 int ret = 0;
822
823 if (drv && drv->pm) {
824 if (drv->pm->restore)
825 ret = drv->pm->restore(dev);
826 } else {
827 ret = platform_legacy_resume(dev);
828 }
829
830 return ret;
831}
832
833static int platform_pm_restore_noirq(struct device *dev)
834{
835 struct platform_driver *pdrv;
836 int ret = 0;
837
838 if (!dev->driver)
839 return 0;
840
841 pdrv = to_platform_driver(dev->driver);
842 if (pdrv->pm) {
843 if (pdrv->pm->restore_noirq)
844 ret = pdrv->pm->restore_noirq(dev);
845 } else {
846 ret = platform_legacy_resume_early(dev);
847 }
848
849 return ret;
850}
851
852#else /* !CONFIG_HIBERNATION */
853
854#define platform_pm_freeze NULL
855#define platform_pm_thaw NULL
856#define platform_pm_poweroff NULL
857#define platform_pm_restore NULL
858#define platform_pm_freeze_noirq NULL
859#define platform_pm_thaw_noirq NULL
860#define platform_pm_poweroff_noirq NULL
861#define platform_pm_restore_noirq NULL
862
863#endif /* !CONFIG_HIBERNATION */
864
865struct pm_ext_ops platform_pm_ops = {
866 .base = {
867 .prepare = platform_pm_prepare,
868 .complete = platform_pm_complete,
869 .suspend = platform_pm_suspend,
870 .resume = platform_pm_resume,
871 .freeze = platform_pm_freeze,
872 .thaw = platform_pm_thaw,
873 .poweroff = platform_pm_poweroff,
874 .restore = platform_pm_restore,
875 },
876 .suspend_noirq = platform_pm_suspend_noirq,
877 .resume_noirq = platform_pm_resume_noirq,
878 .freeze_noirq = platform_pm_freeze_noirq,
879 .thaw_noirq = platform_pm_thaw_noirq,
880 .poweroff_noirq = platform_pm_poweroff_noirq,
881 .restore_noirq = platform_pm_restore_noirq,
882};
883
884#define PLATFORM_PM_OPS_PTR &platform_pm_ops
885
886#else /* !CONFIG_PM_SLEEP */
887
888#define PLATFORM_PM_OPS_PTR NULL
889
890#endif /* !CONFIG_PM_SLEEP */
891
609struct bus_type platform_bus_type = { 892struct bus_type platform_bus_type = {
610 .name = "platform", 893 .name = "platform",
611 .dev_attrs = platform_dev_attrs, 894 .dev_attrs = platform_dev_attrs,
612 .match = platform_match, 895 .match = platform_match,
613 .uevent = platform_uevent, 896 .uevent = platform_uevent,
614 .suspend = platform_suspend, 897 .pm = PLATFORM_PM_OPS_PTR,
615 .suspend_late = platform_suspend_late,
616 .resume_early = platform_resume_early,
617 .resume = platform_resume,
618}; 898};
619EXPORT_SYMBOL_GPL(platform_bus_type); 899EXPORT_SYMBOL_GPL(platform_bus_type);
620 900
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 45cc3d9eacb8..3250c5257b74 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -12,11 +12,9 @@
12 * and add it to the list of power-controlled devices. sysfs entries for 12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added. 13 * controlling device power management will also be added.
14 * 14 *
15 * A different set of lists than the global subsystem list are used to 15 * A separate list is used for keeping track of power info, because the power
16 * keep track of power info because we use different lists to hold 16 * domain dependencies may differ from the ancestral dependencies that the
17 * devices based on what stage of the power management process they 17 * subsystem list maintains.
18 * are in. The power domain dependencies may also differ from the
19 * ancestral dependencies that the subsystem list maintains.
20 */ 18 */
21 19
22#include <linux/device.h> 20#include <linux/device.h>
@@ -30,31 +28,40 @@
30#include "power.h" 28#include "power.h"
31 29
32/* 30/*
33 * The entries in the dpm_active list are in a depth first order, simply 31 * The entries in the dpm_list list are in a depth first order, simply
34 * because children are guaranteed to be discovered after parents, and 32 * because children are guaranteed to be discovered after parents, and
35 * are inserted at the back of the list on discovery. 33 * are inserted at the back of the list on discovery.
36 * 34 *
37 * All the other lists are kept in the same order, for consistency.
38 * However the lists aren't always traversed in the same order.
39 * Semaphores must be acquired from the top (i.e., front) down
40 * and released in the opposite order. Devices must be suspended
41 * from the bottom (i.e., end) up and resumed in the opposite order.
42 * That way no parent will be suspended while it still has an active
43 * child.
44 *
45 * Since device_pm_add() may be called with a device semaphore held, 35 * Since device_pm_add() may be called with a device semaphore held,
46 * we must never try to acquire a device semaphore while holding 36 * we must never try to acquire a device semaphore while holding
47 * dpm_list_mutex. 37 * dpm_list_mutex.
48 */ 38 */
49 39
50LIST_HEAD(dpm_active); 40LIST_HEAD(dpm_list);
51static LIST_HEAD(dpm_off);
52static LIST_HEAD(dpm_off_irq);
53 41
54static DEFINE_MUTEX(dpm_list_mtx); 42static DEFINE_MUTEX(dpm_list_mtx);
55 43
56/* 'true' if all devices have been suspended, protected by dpm_list_mtx */ 44/*
57static bool all_sleeping; 45 * Set once the preparation of devices for a PM transition has started, reset
46 * before starting to resume devices. Protected by dpm_list_mtx.
47 */
48static bool transition_started;
49
50/**
51 * device_pm_lock - lock the list of active devices used by the PM core
52 */
53void device_pm_lock(void)
54{
55 mutex_lock(&dpm_list_mtx);
56}
57
58/**
59 * device_pm_unlock - unlock the list of active devices used by the PM core
60 */
61void device_pm_unlock(void)
62{
63 mutex_unlock(&dpm_list_mtx);
64}
58 65
59/** 66/**
60 * device_pm_add - add a device to the list of active devices 67 * device_pm_add - add a device to the list of active devices
@@ -68,17 +75,25 @@ int device_pm_add(struct device *dev)
68 dev->bus ? dev->bus->name : "No Bus", 75 dev->bus ? dev->bus->name : "No Bus",
69 kobject_name(&dev->kobj)); 76 kobject_name(&dev->kobj));
70 mutex_lock(&dpm_list_mtx); 77 mutex_lock(&dpm_list_mtx);
71 if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) { 78 if (dev->parent) {
72 if (dev->parent->power.sleeping) 79 if (dev->parent->power.status >= DPM_SUSPENDING) {
73 dev_warn(dev, "parent %s is sleeping\n", 80 dev_warn(dev, "parent %s is sleeping, will not add\n",
74 dev->parent->bus_id); 81 dev->parent->bus_id);
75 else 82 WARN_ON(true);
76 dev_warn(dev, "all devices are sleeping\n"); 83 }
84 } else if (transition_started) {
85 /*
86 * We refuse to register parentless devices while a PM
87 * transition is in progress in order to avoid leaving them
88 * unhandled down the road
89 */
77 WARN_ON(true); 90 WARN_ON(true);
78 } 91 }
79 error = dpm_sysfs_add(dev); 92 error = dpm_sysfs_add(dev);
80 if (!error) 93 if (!error) {
81 list_add_tail(&dev->power.entry, &dpm_active); 94 dev->power.status = DPM_ON;
95 list_add_tail(&dev->power.entry, &dpm_list);
96 }
82 mutex_unlock(&dpm_list_mtx); 97 mutex_unlock(&dpm_list_mtx);
83 return error; 98 return error;
84} 99}
@@ -100,73 +115,243 @@ void device_pm_remove(struct device *dev)
100 mutex_unlock(&dpm_list_mtx); 115 mutex_unlock(&dpm_list_mtx);
101} 116}
102 117
118/**
119 * pm_op - execute the PM operation appropiate for given PM event
120 * @dev: Device.
121 * @ops: PM operations to choose from.
122 * @state: PM transition of the system being carried out.
123 */
124static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state)
125{
126 int error = 0;
127
128 switch (state.event) {
129#ifdef CONFIG_SUSPEND
130 case PM_EVENT_SUSPEND:
131 if (ops->suspend) {
132 error = ops->suspend(dev);
133 suspend_report_result(ops->suspend, error);
134 }
135 break;
136 case PM_EVENT_RESUME:
137 if (ops->resume) {
138 error = ops->resume(dev);
139 suspend_report_result(ops->resume, error);
140 }
141 break;
142#endif /* CONFIG_SUSPEND */
143#ifdef CONFIG_HIBERNATION
144 case PM_EVENT_FREEZE:
145 case PM_EVENT_QUIESCE:
146 if (ops->freeze) {
147 error = ops->freeze(dev);
148 suspend_report_result(ops->freeze, error);
149 }
150 break;
151 case PM_EVENT_HIBERNATE:
152 if (ops->poweroff) {
153 error = ops->poweroff(dev);
154 suspend_report_result(ops->poweroff, error);
155 }
156 break;
157 case PM_EVENT_THAW:
158 case PM_EVENT_RECOVER:
159 if (ops->thaw) {
160 error = ops->thaw(dev);
161 suspend_report_result(ops->thaw, error);
162 }
163 break;
164 case PM_EVENT_RESTORE:
165 if (ops->restore) {
166 error = ops->restore(dev);
167 suspend_report_result(ops->restore, error);
168 }
169 break;
170#endif /* CONFIG_HIBERNATION */
171 default:
172 error = -EINVAL;
173 }
174 return error;
175}
176
177/**
178 * pm_noirq_op - execute the PM operation appropiate for given PM event
179 * @dev: Device.
180 * @ops: PM operations to choose from.
181 * @state: PM transition of the system being carried out.
182 *
183 * The operation is executed with interrupts disabled by the only remaining
184 * functional CPU in the system.
185 */
186static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops,
187 pm_message_t state)
188{
189 int error = 0;
190
191 switch (state.event) {
192#ifdef CONFIG_SUSPEND
193 case PM_EVENT_SUSPEND:
194 if (ops->suspend_noirq) {
195 error = ops->suspend_noirq(dev);
196 suspend_report_result(ops->suspend_noirq, error);
197 }
198 break;
199 case PM_EVENT_RESUME:
200 if (ops->resume_noirq) {
201 error = ops->resume_noirq(dev);
202 suspend_report_result(ops->resume_noirq, error);
203 }
204 break;
205#endif /* CONFIG_SUSPEND */
206#ifdef CONFIG_HIBERNATION
207 case PM_EVENT_FREEZE:
208 case PM_EVENT_QUIESCE:
209 if (ops->freeze_noirq) {
210 error = ops->freeze_noirq(dev);
211 suspend_report_result(ops->freeze_noirq, error);
212 }
213 break;
214 case PM_EVENT_HIBERNATE:
215 if (ops->poweroff_noirq) {
216 error = ops->poweroff_noirq(dev);
217 suspend_report_result(ops->poweroff_noirq, error);
218 }
219 break;
220 case PM_EVENT_THAW:
221 case PM_EVENT_RECOVER:
222 if (ops->thaw_noirq) {
223 error = ops->thaw_noirq(dev);
224 suspend_report_result(ops->thaw_noirq, error);
225 }
226 break;
227 case PM_EVENT_RESTORE:
228 if (ops->restore_noirq) {
229 error = ops->restore_noirq(dev);
230 suspend_report_result(ops->restore_noirq, error);
231 }
232 break;
233#endif /* CONFIG_HIBERNATION */
234 default:
235 error = -EINVAL;
236 }
237 return error;
238}
239
240static char *pm_verb(int event)
241{
242 switch (event) {
243 case PM_EVENT_SUSPEND:
244 return "suspend";
245 case PM_EVENT_RESUME:
246 return "resume";
247 case PM_EVENT_FREEZE:
248 return "freeze";
249 case PM_EVENT_QUIESCE:
250 return "quiesce";
251 case PM_EVENT_HIBERNATE:
252 return "hibernate";
253 case PM_EVENT_THAW:
254 return "thaw";
255 case PM_EVENT_RESTORE:
256 return "restore";
257 case PM_EVENT_RECOVER:
258 return "recover";
259 default:
260 return "(unknown PM event)";
261 }
262}
263
264static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
265{
266 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
267 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
268 ", may wakeup" : "");
269}
270
271static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
272 int error)
273{
274 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
275 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
276}
277
103/*------------------------- Resume routines -------------------------*/ 278/*------------------------- Resume routines -------------------------*/
104 279
105/** 280/**
106 * resume_device_early - Power on one device (early resume). 281 * resume_device_noirq - Power on one device (early resume).
107 * @dev: Device. 282 * @dev: Device.
283 * @state: PM transition of the system being carried out.
108 * 284 *
109 * Must be called with interrupts disabled. 285 * Must be called with interrupts disabled.
110 */ 286 */
111static int resume_device_early(struct device *dev) 287static int resume_device_noirq(struct device *dev, pm_message_t state)
112{ 288{
113 int error = 0; 289 int error = 0;
114 290
115 TRACE_DEVICE(dev); 291 TRACE_DEVICE(dev);
116 TRACE_RESUME(0); 292 TRACE_RESUME(0);
117 293
118 if (dev->bus && dev->bus->resume_early) { 294 if (!dev->bus)
119 dev_dbg(dev, "EARLY resume\n"); 295 goto End;
296
297 if (dev->bus->pm) {
298 pm_dev_dbg(dev, state, "EARLY ");
299 error = pm_noirq_op(dev, dev->bus->pm, state);
300 } else if (dev->bus->resume_early) {
301 pm_dev_dbg(dev, state, "legacy EARLY ");
120 error = dev->bus->resume_early(dev); 302 error = dev->bus->resume_early(dev);
121 } 303 }
122 304 End:
123 TRACE_RESUME(error); 305 TRACE_RESUME(error);
124 return error; 306 return error;
125} 307}
126 308
127/** 309/**
128 * dpm_power_up - Power on all regular (non-sysdev) devices. 310 * dpm_power_up - Power on all regular (non-sysdev) devices.
311 * @state: PM transition of the system being carried out.
129 * 312 *
130 * Walk the dpm_off_irq list and power each device up. This 313 * Execute the appropriate "noirq resume" callback for all devices marked
131 * is used for devices that required they be powered down with 314 * as DPM_OFF_IRQ.
132 * interrupts disabled. As devices are powered on, they are moved
133 * to the dpm_off list.
134 * 315 *
135 * Must be called with interrupts disabled and only one CPU running. 316 * Must be called with interrupts disabled and only one CPU running.
136 */ 317 */
137static void dpm_power_up(void) 318static void dpm_power_up(pm_message_t state)
138{ 319{
320 struct device *dev;
139 321
140 while (!list_empty(&dpm_off_irq)) { 322 list_for_each_entry(dev, &dpm_list, power.entry)
141 struct list_head *entry = dpm_off_irq.next; 323 if (dev->power.status > DPM_OFF) {
142 struct device *dev = to_device(entry); 324 int error;
143 325
144 list_move_tail(entry, &dpm_off); 326 dev->power.status = DPM_OFF;
145 resume_device_early(dev); 327 error = resume_device_noirq(dev, state);
146 } 328 if (error)
329 pm_dev_err(dev, state, " early", error);
330 }
147} 331}
148 332
149/** 333/**
150 * device_power_up - Turn on all devices that need special attention. 334 * device_power_up - Turn on all devices that need special attention.
335 * @state: PM transition of the system being carried out.
151 * 336 *
152 * Power on system devices, then devices that required we shut them down 337 * Power on system devices, then devices that required we shut them down
153 * with interrupts disabled. 338 * with interrupts disabled.
154 * 339 *
155 * Must be called with interrupts disabled. 340 * Must be called with interrupts disabled.
156 */ 341 */
157void device_power_up(void) 342void device_power_up(pm_message_t state)
158{ 343{
159 sysdev_resume(); 344 sysdev_resume();
160 dpm_power_up(); 345 dpm_power_up(state);
161} 346}
162EXPORT_SYMBOL_GPL(device_power_up); 347EXPORT_SYMBOL_GPL(device_power_up);
163 348
164/** 349/**
165 * resume_device - Restore state for one device. 350 * resume_device - Restore state for one device.
166 * @dev: Device. 351 * @dev: Device.
167 * 352 * @state: PM transition of the system being carried out.
168 */ 353 */
169static int resume_device(struct device *dev) 354static int resume_device(struct device *dev, pm_message_t state)
170{ 355{
171 int error = 0; 356 int error = 0;
172 357
@@ -175,21 +360,40 @@ static int resume_device(struct device *dev)
175 360
176 down(&dev->sem); 361 down(&dev->sem);
177 362
178 if (dev->bus && dev->bus->resume) { 363 if (dev->bus) {
179 dev_dbg(dev,"resuming\n"); 364 if (dev->bus->pm) {
180 error = dev->bus->resume(dev); 365 pm_dev_dbg(dev, state, "");
366 error = pm_op(dev, &dev->bus->pm->base, state);
367 } else if (dev->bus->resume) {
368 pm_dev_dbg(dev, state, "legacy ");
369 error = dev->bus->resume(dev);
370 }
371 if (error)
372 goto End;
181 } 373 }
182 374
183 if (!error && dev->type && dev->type->resume) { 375 if (dev->type) {
184 dev_dbg(dev,"resuming\n"); 376 if (dev->type->pm) {
185 error = dev->type->resume(dev); 377 pm_dev_dbg(dev, state, "type ");
378 error = pm_op(dev, dev->type->pm, state);
379 } else if (dev->type->resume) {
380 pm_dev_dbg(dev, state, "legacy type ");
381 error = dev->type->resume(dev);
382 }
383 if (error)
384 goto End;
186 } 385 }
187 386
188 if (!error && dev->class && dev->class->resume) { 387 if (dev->class) {
189 dev_dbg(dev,"class resume\n"); 388 if (dev->class->pm) {
190 error = dev->class->resume(dev); 389 pm_dev_dbg(dev, state, "class ");
390 error = pm_op(dev, dev->class->pm, state);
391 } else if (dev->class->resume) {
392 pm_dev_dbg(dev, state, "legacy class ");
393 error = dev->class->resume(dev);
394 }
191 } 395 }
192 396 End:
193 up(&dev->sem); 397 up(&dev->sem);
194 398
195 TRACE_RESUME(error); 399 TRACE_RESUME(error);
@@ -198,78 +402,161 @@ static int resume_device(struct device *dev)
198 402
199/** 403/**
200 * dpm_resume - Resume every device. 404 * dpm_resume - Resume every device.
405 * @state: PM transition of the system being carried out.
201 * 406 *
202 * Resume the devices that have either not gone through 407 * Execute the appropriate "resume" callback for all devices the status of
203 * the late suspend, or that did go through it but also 408 * which indicates that they are inactive.
204 * went through the early resume. 409 */
410static void dpm_resume(pm_message_t state)
411{
412 struct list_head list;
413
414 INIT_LIST_HEAD(&list);
415 mutex_lock(&dpm_list_mtx);
416 transition_started = false;
417 while (!list_empty(&dpm_list)) {
418 struct device *dev = to_device(dpm_list.next);
419
420 get_device(dev);
421 if (dev->power.status >= DPM_OFF) {
422 int error;
423
424 dev->power.status = DPM_RESUMING;
425 mutex_unlock(&dpm_list_mtx);
426
427 error = resume_device(dev, state);
428
429 mutex_lock(&dpm_list_mtx);
430 if (error)
431 pm_dev_err(dev, state, "", error);
432 } else if (dev->power.status == DPM_SUSPENDING) {
433 /* Allow new children of the device to be registered */
434 dev->power.status = DPM_RESUMING;
435 }
436 if (!list_empty(&dev->power.entry))
437 list_move_tail(&dev->power.entry, &list);
438 put_device(dev);
439 }
440 list_splice(&list, &dpm_list);
441 mutex_unlock(&dpm_list_mtx);
442}
443
444/**
445 * complete_device - Complete a PM transition for given device
446 * @dev: Device.
447 * @state: PM transition of the system being carried out.
448 */
449static void complete_device(struct device *dev, pm_message_t state)
450{
451 down(&dev->sem);
452
453 if (dev->class && dev->class->pm && dev->class->pm->complete) {
454 pm_dev_dbg(dev, state, "completing class ");
455 dev->class->pm->complete(dev);
456 }
457
458 if (dev->type && dev->type->pm && dev->type->pm->complete) {
459 pm_dev_dbg(dev, state, "completing type ");
460 dev->type->pm->complete(dev);
461 }
462
463 if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) {
464 pm_dev_dbg(dev, state, "completing ");
465 dev->bus->pm->base.complete(dev);
466 }
467
468 up(&dev->sem);
469}
470
471/**
472 * dpm_complete - Complete a PM transition for all devices.
473 * @state: PM transition of the system being carried out.
205 * 474 *
206 * Take devices from the dpm_off_list, resume them, 475 * Execute the ->complete() callbacks for all devices that are not marked
207 * and put them on the dpm_locked list. 476 * as DPM_ON.
208 */ 477 */
209static void dpm_resume(void) 478static void dpm_complete(pm_message_t state)
210{ 479{
480 struct list_head list;
481
482 INIT_LIST_HEAD(&list);
211 mutex_lock(&dpm_list_mtx); 483 mutex_lock(&dpm_list_mtx);
212 all_sleeping = false; 484 while (!list_empty(&dpm_list)) {
213 while(!list_empty(&dpm_off)) { 485 struct device *dev = to_device(dpm_list.prev);
214 struct list_head *entry = dpm_off.next;
215 struct device *dev = to_device(entry);
216 486
217 list_move_tail(entry, &dpm_active); 487 get_device(dev);
218 dev->power.sleeping = false; 488 if (dev->power.status > DPM_ON) {
219 mutex_unlock(&dpm_list_mtx); 489 dev->power.status = DPM_ON;
220 resume_device(dev); 490 mutex_unlock(&dpm_list_mtx);
221 mutex_lock(&dpm_list_mtx); 491
492 complete_device(dev, state);
493
494 mutex_lock(&dpm_list_mtx);
495 }
496 if (!list_empty(&dev->power.entry))
497 list_move(&dev->power.entry, &list);
498 put_device(dev);
222 } 499 }
500 list_splice(&list, &dpm_list);
223 mutex_unlock(&dpm_list_mtx); 501 mutex_unlock(&dpm_list_mtx);
224} 502}
225 503
226/** 504/**
227 * device_resume - Restore state of each device in system. 505 * device_resume - Restore state of each device in system.
506 * @state: PM transition of the system being carried out.
228 * 507 *
229 * Resume all the devices, unlock them all, and allow new 508 * Resume all the devices, unlock them all, and allow new
230 * devices to be registered once again. 509 * devices to be registered once again.
231 */ 510 */
232void device_resume(void) 511void device_resume(pm_message_t state)
233{ 512{
234 might_sleep(); 513 might_sleep();
235 dpm_resume(); 514 dpm_resume(state);
515 dpm_complete(state);
236} 516}
237EXPORT_SYMBOL_GPL(device_resume); 517EXPORT_SYMBOL_GPL(device_resume);
238 518
239 519
240/*------------------------- Suspend routines -------------------------*/ 520/*------------------------- Suspend routines -------------------------*/
241 521
242static inline char *suspend_verb(u32 event) 522/**
523 * resume_event - return a PM message representing the resume event
524 * corresponding to given sleep state.
525 * @sleep_state: PM message representing a sleep state.
526 */
527static pm_message_t resume_event(pm_message_t sleep_state)
243{ 528{
244 switch (event) { 529 switch (sleep_state.event) {
245 case PM_EVENT_SUSPEND: return "suspend"; 530 case PM_EVENT_SUSPEND:
246 case PM_EVENT_FREEZE: return "freeze"; 531 return PMSG_RESUME;
247 case PM_EVENT_PRETHAW: return "prethaw"; 532 case PM_EVENT_FREEZE:
248 default: return "(unknown suspend event)"; 533 case PM_EVENT_QUIESCE:
534 return PMSG_RECOVER;
535 case PM_EVENT_HIBERNATE:
536 return PMSG_RESTORE;
249 } 537 }
250} 538 return PMSG_ON;
251
252static void
253suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
254{
255 dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
256 ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
257 ", may wakeup" : "");
258} 539}
259 540
260/** 541/**
261 * suspend_device_late - Shut down one device (late suspend). 542 * suspend_device_noirq - Shut down one device (late suspend).
262 * @dev: Device. 543 * @dev: Device.
263 * @state: Power state device is entering. 544 * @state: PM transition of the system being carried out.
264 * 545 *
265 * This is called with interrupts off and only a single CPU running. 546 * This is called with interrupts off and only a single CPU running.
266 */ 547 */
267static int suspend_device_late(struct device *dev, pm_message_t state) 548static int suspend_device_noirq(struct device *dev, pm_message_t state)
268{ 549{
269 int error = 0; 550 int error = 0;
270 551
271 if (dev->bus && dev->bus->suspend_late) { 552 if (!dev->bus)
272 suspend_device_dbg(dev, state, "LATE "); 553 return 0;
554
555 if (dev->bus->pm) {
556 pm_dev_dbg(dev, state, "LATE ");
557 error = pm_noirq_op(dev, dev->bus->pm, state);
558 } else if (dev->bus->suspend_late) {
559 pm_dev_dbg(dev, state, "legacy LATE ");
273 error = dev->bus->suspend_late(dev, state); 560 error = dev->bus->suspend_late(dev, state);
274 suspend_report_result(dev->bus->suspend_late, error); 561 suspend_report_result(dev->bus->suspend_late, error);
275 } 562 }
@@ -278,37 +565,30 @@ static int suspend_device_late(struct device *dev, pm_message_t state)
278 565
279/** 566/**
280 * device_power_down - Shut down special devices. 567 * device_power_down - Shut down special devices.
281 * @state: Power state to enter. 568 * @state: PM transition of the system being carried out.
282 * 569 *
283 * Power down devices that require interrupts to be disabled 570 * Power down devices that require interrupts to be disabled.
284 * and move them from the dpm_off list to the dpm_off_irq list.
285 * Then power down system devices. 571 * Then power down system devices.
286 * 572 *
287 * Must be called with interrupts disabled and only one CPU running. 573 * Must be called with interrupts disabled and only one CPU running.
288 */ 574 */
289int device_power_down(pm_message_t state) 575int device_power_down(pm_message_t state)
290{ 576{
577 struct device *dev;
291 int error = 0; 578 int error = 0;
292 579
293 while (!list_empty(&dpm_off)) { 580 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
294 struct list_head *entry = dpm_off.prev; 581 error = suspend_device_noirq(dev, state);
295 struct device *dev = to_device(entry);
296
297 error = suspend_device_late(dev, state);
298 if (error) { 582 if (error) {
299 printk(KERN_ERR "Could not power down device %s: " 583 pm_dev_err(dev, state, " late", error);
300 "error %d\n",
301 kobject_name(&dev->kobj), error);
302 break; 584 break;
303 } 585 }
304 if (!list_empty(&dev->power.entry)) 586 dev->power.status = DPM_OFF_IRQ;
305 list_move(&dev->power.entry, &dpm_off_irq);
306 } 587 }
307
308 if (!error) 588 if (!error)
309 error = sysdev_suspend(state); 589 error = sysdev_suspend(state);
310 if (error) 590 if (error)
311 dpm_power_up(); 591 dpm_power_up(resume_event(state));
312 return error; 592 return error;
313} 593}
314EXPORT_SYMBOL_GPL(device_power_down); 594EXPORT_SYMBOL_GPL(device_power_down);
@@ -316,7 +596,7 @@ EXPORT_SYMBOL_GPL(device_power_down);
316/** 596/**
317 * suspend_device - Save state of one device. 597 * suspend_device - Save state of one device.
318 * @dev: Device. 598 * @dev: Device.
319 * @state: Power state device is entering. 599 * @state: PM transition of the system being carried out.
320 */ 600 */
321static int suspend_device(struct device *dev, pm_message_t state) 601static int suspend_device(struct device *dev, pm_message_t state)
322{ 602{
@@ -324,24 +604,43 @@ static int suspend_device(struct device *dev, pm_message_t state)
324 604
325 down(&dev->sem); 605 down(&dev->sem);
326 606
327 if (dev->class && dev->class->suspend) { 607 if (dev->class) {
328 suspend_device_dbg(dev, state, "class "); 608 if (dev->class->pm) {
329 error = dev->class->suspend(dev, state); 609 pm_dev_dbg(dev, state, "class ");
330 suspend_report_result(dev->class->suspend, error); 610 error = pm_op(dev, dev->class->pm, state);
611 } else if (dev->class->suspend) {
612 pm_dev_dbg(dev, state, "legacy class ");
613 error = dev->class->suspend(dev, state);
614 suspend_report_result(dev->class->suspend, error);
615 }
616 if (error)
617 goto End;
331 } 618 }
332 619
333 if (!error && dev->type && dev->type->suspend) { 620 if (dev->type) {
334 suspend_device_dbg(dev, state, "type "); 621 if (dev->type->pm) {
335 error = dev->type->suspend(dev, state); 622 pm_dev_dbg(dev, state, "type ");
336 suspend_report_result(dev->type->suspend, error); 623 error = pm_op(dev, dev->type->pm, state);
624 } else if (dev->type->suspend) {
625 pm_dev_dbg(dev, state, "legacy type ");
626 error = dev->type->suspend(dev, state);
627 suspend_report_result(dev->type->suspend, error);
628 }
629 if (error)
630 goto End;
337 } 631 }
338 632
339 if (!error && dev->bus && dev->bus->suspend) { 633 if (dev->bus) {
340 suspend_device_dbg(dev, state, ""); 634 if (dev->bus->pm) {
341 error = dev->bus->suspend(dev, state); 635 pm_dev_dbg(dev, state, "");
342 suspend_report_result(dev->bus->suspend, error); 636 error = pm_op(dev, &dev->bus->pm->base, state);
637 } else if (dev->bus->suspend) {
638 pm_dev_dbg(dev, state, "legacy ");
639 error = dev->bus->suspend(dev, state);
640 suspend_report_result(dev->bus->suspend, error);
641 }
343 } 642 }
344 643 End:
345 up(&dev->sem); 644 up(&dev->sem);
346 645
347 return error; 646 return error;
@@ -349,67 +648,139 @@ static int suspend_device(struct device *dev, pm_message_t state)
349 648
350/** 649/**
351 * dpm_suspend - Suspend every device. 650 * dpm_suspend - Suspend every device.
352 * @state: Power state to put each device in. 651 * @state: PM transition of the system being carried out.
353 *
354 * Walk the dpm_locked list. Suspend each device and move it
355 * to the dpm_off list.
356 * 652 *
357 * (For historical reasons, if it returns -EAGAIN, that used to mean 653 * Execute the appropriate "suspend" callbacks for all devices.
358 * that the device would be called again with interrupts disabled.
359 * These days, we use the "suspend_late()" callback for that, so we
360 * print a warning and consider it an error).
361 */ 654 */
362static int dpm_suspend(pm_message_t state) 655static int dpm_suspend(pm_message_t state)
363{ 656{
657 struct list_head list;
364 int error = 0; 658 int error = 0;
365 659
660 INIT_LIST_HEAD(&list);
366 mutex_lock(&dpm_list_mtx); 661 mutex_lock(&dpm_list_mtx);
367 while (!list_empty(&dpm_active)) { 662 while (!list_empty(&dpm_list)) {
368 struct list_head *entry = dpm_active.prev; 663 struct device *dev = to_device(dpm_list.prev);
369 struct device *dev = to_device(entry);
370 664
371 WARN_ON(dev->parent && dev->parent->power.sleeping); 665 get_device(dev);
372
373 dev->power.sleeping = true;
374 mutex_unlock(&dpm_list_mtx); 666 mutex_unlock(&dpm_list_mtx);
667
375 error = suspend_device(dev, state); 668 error = suspend_device(dev, state);
669
376 mutex_lock(&dpm_list_mtx); 670 mutex_lock(&dpm_list_mtx);
377 if (error) { 671 if (error) {
378 printk(KERN_ERR "Could not suspend device %s: " 672 pm_dev_err(dev, state, "", error);
379 "error %d%s\n", 673 put_device(dev);
380 kobject_name(&dev->kobj),
381 error,
382 (error == -EAGAIN ?
383 " (please convert to suspend_late)" :
384 ""));
385 dev->power.sleeping = false;
386 break; 674 break;
387 } 675 }
676 dev->power.status = DPM_OFF;
388 if (!list_empty(&dev->power.entry)) 677 if (!list_empty(&dev->power.entry))
389 list_move(&dev->power.entry, &dpm_off); 678 list_move(&dev->power.entry, &list);
679 put_device(dev);
390 } 680 }
391 if (!error) 681 list_splice(&list, dpm_list.prev);
392 all_sleeping = true;
393 mutex_unlock(&dpm_list_mtx); 682 mutex_unlock(&dpm_list_mtx);
683 return error;
684}
685
686/**
687 * prepare_device - Execute the ->prepare() callback(s) for given device.
688 * @dev: Device.
689 * @state: PM transition of the system being carried out.
690 */
691static int prepare_device(struct device *dev, pm_message_t state)
692{
693 int error = 0;
694
695 down(&dev->sem);
696
697 if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) {
698 pm_dev_dbg(dev, state, "preparing ");
699 error = dev->bus->pm->base.prepare(dev);
700 suspend_report_result(dev->bus->pm->base.prepare, error);
701 if (error)
702 goto End;
703 }
704
705 if (dev->type && dev->type->pm && dev->type->pm->prepare) {
706 pm_dev_dbg(dev, state, "preparing type ");
707 error = dev->type->pm->prepare(dev);
708 suspend_report_result(dev->type->pm->prepare, error);
709 if (error)
710 goto End;
711 }
712
713 if (dev->class && dev->class->pm && dev->class->pm->prepare) {
714 pm_dev_dbg(dev, state, "preparing class ");
715 error = dev->class->pm->prepare(dev);
716 suspend_report_result(dev->class->pm->prepare, error);
717 }
718 End:
719 up(&dev->sem);
720
721 return error;
722}
723
724/**
725 * dpm_prepare - Prepare all devices for a PM transition.
726 * @state: PM transition of the system being carried out.
727 *
728 * Execute the ->prepare() callback for all devices.
729 */
730static int dpm_prepare(pm_message_t state)
731{
732 struct list_head list;
733 int error = 0;
734
735 INIT_LIST_HEAD(&list);
736 mutex_lock(&dpm_list_mtx);
737 transition_started = true;
738 while (!list_empty(&dpm_list)) {
739 struct device *dev = to_device(dpm_list.next);
740
741 get_device(dev);
742 dev->power.status = DPM_PREPARING;
743 mutex_unlock(&dpm_list_mtx);
394 744
745 error = prepare_device(dev, state);
746
747 mutex_lock(&dpm_list_mtx);
748 if (error) {
749 dev->power.status = DPM_ON;
750 if (error == -EAGAIN) {
751 put_device(dev);
752 continue;
753 }
754 printk(KERN_ERR "PM: Failed to prepare device %s "
755 "for power transition: error %d\n",
756 kobject_name(&dev->kobj), error);
757 put_device(dev);
758 break;
759 }
760 dev->power.status = DPM_SUSPENDING;
761 if (!list_empty(&dev->power.entry))
762 list_move_tail(&dev->power.entry, &list);
763 put_device(dev);
764 }
765 list_splice(&list, &dpm_list);
766 mutex_unlock(&dpm_list_mtx);
395 return error; 767 return error;
396} 768}
397 769
398/** 770/**
399 * device_suspend - Save state and stop all devices in system. 771 * device_suspend - Save state and stop all devices in system.
400 * @state: new power management state 772 * @state: PM transition of the system being carried out.
401 * 773 *
402 * Prevent new devices from being registered, then lock all devices 774 * Prepare and suspend all devices.
403 * and suspend them.
404 */ 775 */
405int device_suspend(pm_message_t state) 776int device_suspend(pm_message_t state)
406{ 777{
407 int error; 778 int error;
408 779
409 might_sleep(); 780 might_sleep();
410 error = dpm_suspend(state); 781 error = dpm_prepare(state);
411 if (error) 782 if (!error)
412 device_resume(); 783 error = dpm_suspend(state);
413 return error; 784 return error;
414} 785}
415EXPORT_SYMBOL_GPL(device_suspend); 786EXPORT_SYMBOL_GPL(device_suspend);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index a6894f2a4b99..a3252c0e2887 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@
4 * main.c 4 * main.c
5 */ 5 */
6 6
7extern struct list_head dpm_active; /* The active device list */ 7extern struct list_head dpm_list; /* The active device list */
8 8
9static inline struct device *to_device(struct list_head *entry) 9static inline struct device *to_device(struct list_head *entry)
10{ 10{
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d11f74b038db..596aeecfdffe 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,9 +6,6 @@
6#include <linux/string.h> 6#include <linux/string.h>
7#include "power.h" 7#include "power.h"
8 8
9int (*platform_enable_wakeup)(struct device *dev, int is_on);
10
11
12/* 9/*
13 * wakeup - Report/change current wakeup option for device 10 * wakeup - Report/change current wakeup option for device
14 * 11 *
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 87a7f1d02578..9b1b20b59e0a 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -188,9 +188,9 @@ static int show_file_hash(unsigned int value)
188static int show_dev_hash(unsigned int value) 188static int show_dev_hash(unsigned int value)
189{ 189{
190 int match = 0; 190 int match = 0;
191 struct list_head * entry = dpm_active.prev; 191 struct list_head *entry = dpm_list.prev;
192 192
193 while (entry != &dpm_active) { 193 while (entry != &dpm_list) {
194 struct device * dev = to_device(entry); 194 struct device * dev = to_device(entry);
195 unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); 195 unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH);
196 if (hash == value) { 196 if (hash == value) {
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 0d1d2133d9bc..61ad8d639ba3 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -433,4 +433,16 @@ config VIRTIO_BLK
433 This is the virtual block driver for virtio. It can be used with 433 This is the virtual block driver for virtio. It can be used with
434 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 434 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
435 435
436config BLK_DEV_HD
437 bool "Very old hard disk (MFM/RLL/IDE) driver"
438 depends on HAVE_IDE
439 depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN
440 help
441 This is a very old hard disk driver that lacks the enhanced
442 functionality of the newer ones.
443
444 It is required for systems with ancient MFM/RLL/ESDI drives.
445
446 If unsure, say N.
447
436endif # BLK_DEV 448endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 5e584306be99..204332b29578 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -29,5 +29,6 @@ obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
29obj-$(CONFIG_VIODASD) += viodasd.o 29obj-$(CONFIG_VIODASD) += viodasd.o
30obj-$(CONFIG_BLK_DEV_SX8) += sx8.o 30obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
31obj-$(CONFIG_BLK_DEV_UB) += ub.o 31obj-$(CONFIG_BLK_DEV_UB) += ub.o
32obj-$(CONFIG_BLK_DEV_HD) += hd.o
32 33
33obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o 34obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
diff --git a/drivers/ide/legacy/hd.c b/drivers/block/hd.c
index abdedf56643e..682243bf2e46 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/block/hd.c
@@ -37,7 +37,6 @@
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/ioport.h> 39#include <linux/ioport.h>
40#include <linux/mc146818rtc.h> /* CMOS defines */
41#include <linux/init.h> 40#include <linux/init.h>
42#include <linux/blkpg.h> 41#include <linux/blkpg.h>
43#include <linux/hdreg.h> 42#include <linux/hdreg.h>
@@ -812,4 +811,4 @@ static int __init parse_hd_setup(char *line)
812} 811}
813__setup("hd=", parse_hd_setup); 812__setup("hd=", parse_hd_setup);
814 813
815module_init(hd_init); 814late_initcall(hd_init);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2d854bb9373e..650e6b44ce65 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -649,6 +649,14 @@ config HVCS
649 which will also be compiled when this driver is built as a 649 which will also be compiled when this driver is built as a
650 module. 650 module.
651 651
652config IBM_BSR
653 tristate "IBM POWER Barrier Synchronization Register support"
654 depends on PPC_PSERIES
655 help
656 This devices exposes a hardware mechanism for fast synchronization
657 of threads across a large system which avoids bouncing a cacheline
658 between several cores on a system
659
652source "drivers/char/ipmi/Kconfig" 660source "drivers/char/ipmi/Kconfig"
653 661
654config DS1620 662config DS1620
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 81630a68475c..0e0d12a06462 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_MMTIMER) += mmtimer.o
57obj-$(CONFIG_VIOCONS) += viocons.o 57obj-$(CONFIG_VIOCONS) += viocons.o
58obj-$(CONFIG_VIOTAPE) += viotape.o 58obj-$(CONFIG_VIOTAPE) += viotape.o
59obj-$(CONFIG_HVCS) += hvcs.o 59obj-$(CONFIG_HVCS) += hvcs.o
60obj-$(CONFIG_IBM_BSR) += bsr.o
60obj-$(CONFIG_SGI_MBCS) += mbcs.o 61obj-$(CONFIG_SGI_MBCS) += mbcs.o
61obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o 62obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
62obj-$(CONFIG_BFIN_OTP) += bfin-otp.o 63obj-$(CONFIG_BFIN_OTP) += bfin-otp.o
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index da8a1658a273..aaca40283be9 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -59,6 +59,55 @@ struct apm_queue {
59}; 59};
60 60
61/* 61/*
62 * thread states (for threads using a writable /dev/apm_bios fd):
63 *
64 * SUSPEND_NONE: nothing happening
65 * SUSPEND_PENDING: suspend event queued for thread and pending to be read
66 * SUSPEND_READ: suspend event read, pending acknowledgement
67 * SUSPEND_ACKED: acknowledgement received from thread (via ioctl),
68 * waiting for resume
69 * SUSPEND_ACKTO: acknowledgement timeout
70 * SUSPEND_DONE: thread had acked suspend and is now notified of
71 * resume
72 *
73 * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume
74 *
75 * A thread migrates in one of three paths:
76 * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE
77 * -6-> ACKTO -7-> NONE
78 * NONE -8-> WAIT -9-> NONE
79 *
80 * While in PENDING or READ, the thread is accounted for in the
81 * suspend_acks_pending counter.
82 *
83 * The transitions are invoked as follows:
84 * 1: suspend event is signalled from the core PM code
85 * 2: the suspend event is read from the fd by the userspace thread
86 * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack)
87 * 4: core PM code signals that we have resumed
88 * 5: APM_IOC_SUSPEND ioctl returns
89 *
90 * 6: the notifier invoked from the core PM code timed out waiting
91 * for all relevant threds to enter ACKED state and puts those
92 * that haven't into ACKTO
93 * 7: those threads issue APM_IOC_SUSPEND ioctl too late,
94 * get an error
95 *
96 * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend),
97 * ioctl code invokes pm_suspend()
98 * 9: pm_suspend() returns indicating resume
99 */
100enum apm_suspend_state {
101 SUSPEND_NONE,
102 SUSPEND_PENDING,
103 SUSPEND_READ,
104 SUSPEND_ACKED,
105 SUSPEND_ACKTO,
106 SUSPEND_WAIT,
107 SUSPEND_DONE,
108};
109
110/*
62 * The per-file APM data 111 * The per-file APM data
63 */ 112 */
64struct apm_user { 113struct apm_user {
@@ -69,13 +118,7 @@ struct apm_user {
69 unsigned int reader: 1; 118 unsigned int reader: 1;
70 119
71 int suspend_result; 120 int suspend_result;
72 unsigned int suspend_state; 121 enum apm_suspend_state suspend_state;
73#define SUSPEND_NONE 0 /* no suspend pending */
74#define SUSPEND_PENDING 1 /* suspend pending read */
75#define SUSPEND_READ 2 /* suspend read, pending ack */
76#define SUSPEND_ACKED 3 /* suspend acked */
77#define SUSPEND_WAIT 4 /* waiting for suspend */
78#define SUSPEND_DONE 5 /* suspend completed */
79 122
80 struct apm_queue queue; 123 struct apm_queue queue;
81}; 124};
@@ -83,7 +126,8 @@ struct apm_user {
83/* 126/*
84 * Local variables 127 * Local variables
85 */ 128 */
86static int suspends_pending; 129static atomic_t suspend_acks_pending = ATOMIC_INIT(0);
130static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0);
87static int apm_disabled; 131static int apm_disabled;
88static struct task_struct *kapmd_tsk; 132static struct task_struct *kapmd_tsk;
89 133
@@ -166,78 +210,6 @@ static void queue_event(apm_event_t event)
166 wake_up_interruptible(&apm_waitqueue); 210 wake_up_interruptible(&apm_waitqueue);
167} 211}
168 212
169/*
170 * queue_suspend_event - queue an APM suspend event.
171 *
172 * Check that we're in a state where we can suspend. If not,
173 * return -EBUSY. Otherwise, queue an event to all "writer"
174 * users. If there are no "writer" users, return '1' to
175 * indicate that we can immediately suspend.
176 */
177static int queue_suspend_event(apm_event_t event, struct apm_user *sender)
178{
179 struct apm_user *as;
180 int ret = 1;
181
182 mutex_lock(&state_lock);
183 down_read(&user_list_lock);
184
185 /*
186 * If a thread is still processing, we can't suspend, so reject
187 * the request.
188 */
189 list_for_each_entry(as, &apm_user_list, list) {
190 if (as != sender && as->reader && as->writer && as->suser &&
191 as->suspend_state != SUSPEND_NONE) {
192 ret = -EBUSY;
193 goto out;
194 }
195 }
196
197 list_for_each_entry(as, &apm_user_list, list) {
198 if (as != sender && as->reader && as->writer && as->suser) {
199 as->suspend_state = SUSPEND_PENDING;
200 suspends_pending++;
201 queue_add_event(&as->queue, event);
202 ret = 0;
203 }
204 }
205 out:
206 up_read(&user_list_lock);
207 mutex_unlock(&state_lock);
208 wake_up_interruptible(&apm_waitqueue);
209 return ret;
210}
211
212static void apm_suspend(void)
213{
214 struct apm_user *as;
215 int err = pm_suspend(PM_SUSPEND_MEM);
216
217 /*
218 * Anyone on the APM queues will think we're still suspended.
219 * Send a message so everyone knows we're now awake again.
220 */
221 queue_event(APM_NORMAL_RESUME);
222
223 /*
224 * Finally, wake up anyone who is sleeping on the suspend.
225 */
226 mutex_lock(&state_lock);
227 down_read(&user_list_lock);
228 list_for_each_entry(as, &apm_user_list, list) {
229 if (as->suspend_state == SUSPEND_WAIT ||
230 as->suspend_state == SUSPEND_ACKED) {
231 as->suspend_result = err;
232 as->suspend_state = SUSPEND_DONE;
233 }
234 }
235 up_read(&user_list_lock);
236 mutex_unlock(&state_lock);
237
238 wake_up(&apm_suspend_waitqueue);
239}
240
241static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) 213static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
242{ 214{
243 struct apm_user *as = fp->private_data; 215 struct apm_user *as = fp->private_data;
@@ -308,25 +280,22 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
308 280
309 as->suspend_result = -EINTR; 281 as->suspend_result = -EINTR;
310 282
311 if (as->suspend_state == SUSPEND_READ) { 283 switch (as->suspend_state) {
312 int pending; 284 case SUSPEND_READ:
313
314 /* 285 /*
315 * If we read a suspend command from /dev/apm_bios, 286 * If we read a suspend command from /dev/apm_bios,
316 * then the corresponding APM_IOC_SUSPEND ioctl is 287 * then the corresponding APM_IOC_SUSPEND ioctl is
317 * interpreted as an acknowledge. 288 * interpreted as an acknowledge.
318 */ 289 */
319 as->suspend_state = SUSPEND_ACKED; 290 as->suspend_state = SUSPEND_ACKED;
320 suspends_pending--; 291 atomic_dec(&suspend_acks_pending);
321 pending = suspends_pending == 0;
322 mutex_unlock(&state_lock); 292 mutex_unlock(&state_lock);
323 293
324 /* 294 /*
325 * If there are no further acknowledges required, 295 * suspend_acks_pending changed, the notifier needs to
326 * suspend the system. 296 * be woken up for this
327 */ 297 */
328 if (pending) 298 wake_up(&apm_suspend_waitqueue);
329 apm_suspend();
330 299
331 /* 300 /*
332 * Wait for the suspend/resume to complete. If there 301 * Wait for the suspend/resume to complete. If there
@@ -342,35 +311,21 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
342 * try_to_freeze() in freezer_count() will not trigger 311 * try_to_freeze() in freezer_count() will not trigger
343 */ 312 */
344 freezer_count(); 313 freezer_count();
345 } else { 314 break;
315 case SUSPEND_ACKTO:
316 as->suspend_result = -ETIMEDOUT;
317 mutex_unlock(&state_lock);
318 break;
319 default:
346 as->suspend_state = SUSPEND_WAIT; 320 as->suspend_state = SUSPEND_WAIT;
347 mutex_unlock(&state_lock); 321 mutex_unlock(&state_lock);
348 322
349 /* 323 /*
350 * Otherwise it is a request to suspend the system. 324 * Otherwise it is a request to suspend the system.
351 * Queue an event for all readers, and expect an 325 * Just invoke pm_suspend(), we'll handle it from
352 * acknowledge from all writers who haven't already 326 * there via the notifier.
353 * acknowledged.
354 */
355 err = queue_suspend_event(APM_USER_SUSPEND, as);
356 if (err < 0) {
357 /*
358 * Avoid taking the lock here - this
359 * should be fine.
360 */
361 as->suspend_state = SUSPEND_NONE;
362 break;
363 }
364
365 if (err > 0)
366 apm_suspend();
367
368 /*
369 * Wait for the suspend/resume to complete. If there
370 * are pending acknowledges, we wait here for them.
371 */ 327 */
372 wait_event_freezable(apm_suspend_waitqueue, 328 as->suspend_result = pm_suspend(PM_SUSPEND_MEM);
373 as->suspend_state == SUSPEND_DONE);
374 } 329 }
375 330
376 mutex_lock(&state_lock); 331 mutex_lock(&state_lock);
@@ -386,7 +341,6 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
386static int apm_release(struct inode * inode, struct file * filp) 341static int apm_release(struct inode * inode, struct file * filp)
387{ 342{
388 struct apm_user *as = filp->private_data; 343 struct apm_user *as = filp->private_data;
389 int pending = 0;
390 344
391 filp->private_data = NULL; 345 filp->private_data = NULL;
392 346
@@ -396,18 +350,15 @@ static int apm_release(struct inode * inode, struct file * filp)
396 350
397 /* 351 /*
398 * We are now unhooked from the chain. As far as new 352 * We are now unhooked from the chain. As far as new
399 * events are concerned, we no longer exist. However, we 353 * events are concerned, we no longer exist.
400 * need to balance suspends_pending, which means the
401 * possibility of sleeping.
402 */ 354 */
403 mutex_lock(&state_lock); 355 mutex_lock(&state_lock);
404 if (as->suspend_state != SUSPEND_NONE) { 356 if (as->suspend_state == SUSPEND_PENDING ||
405 suspends_pending -= 1; 357 as->suspend_state == SUSPEND_READ)
406 pending = suspends_pending == 0; 358 atomic_dec(&suspend_acks_pending);
407 }
408 mutex_unlock(&state_lock); 359 mutex_unlock(&state_lock);
409 if (pending) 360
410 apm_suspend(); 361 wake_up(&apm_suspend_waitqueue);
411 362
412 kfree(as); 363 kfree(as);
413 return 0; 364 return 0;
@@ -545,7 +496,6 @@ static int kapmd(void *arg)
545{ 496{
546 do { 497 do {
547 apm_event_t event; 498 apm_event_t event;
548 int ret;
549 499
550 wait_event_interruptible(kapmd_wait, 500 wait_event_interruptible(kapmd_wait,
551 !queue_empty(&kapmd_queue) || kthread_should_stop()); 501 !queue_empty(&kapmd_queue) || kthread_should_stop());
@@ -570,20 +520,13 @@ static int kapmd(void *arg)
570 520
571 case APM_USER_SUSPEND: 521 case APM_USER_SUSPEND:
572 case APM_SYS_SUSPEND: 522 case APM_SYS_SUSPEND:
573 ret = queue_suspend_event(event, NULL); 523 pm_suspend(PM_SUSPEND_MEM);
574 if (ret < 0) {
575 /*
576 * We were busy. Try again in 50ms.
577 */
578 queue_add_event(&kapmd_queue, event);
579 msleep(50);
580 }
581 if (ret > 0)
582 apm_suspend();
583 break; 524 break;
584 525
585 case APM_CRITICAL_SUSPEND: 526 case APM_CRITICAL_SUSPEND:
586 apm_suspend(); 527 atomic_inc(&userspace_notification_inhibit);
528 pm_suspend(PM_SUSPEND_MEM);
529 atomic_dec(&userspace_notification_inhibit);
587 break; 530 break;
588 } 531 }
589 } while (1); 532 } while (1);
@@ -591,6 +534,120 @@ static int kapmd(void *arg)
591 return 0; 534 return 0;
592} 535}
593 536
537static int apm_suspend_notifier(struct notifier_block *nb,
538 unsigned long event,
539 void *dummy)
540{
541 struct apm_user *as;
542 int err;
543
544 /* short-cut emergency suspends */
545 if (atomic_read(&userspace_notification_inhibit))
546 return NOTIFY_DONE;
547
548 switch (event) {
549 case PM_SUSPEND_PREPARE:
550 /*
551 * Queue an event to all "writer" users that we want
552 * to suspend and need their ack.
553 */
554 mutex_lock(&state_lock);
555 down_read(&user_list_lock);
556
557 list_for_each_entry(as, &apm_user_list, list) {
558 if (as->suspend_state != SUSPEND_WAIT && as->reader &&
559 as->writer && as->suser) {
560 as->suspend_state = SUSPEND_PENDING;
561 atomic_inc(&suspend_acks_pending);
562 queue_add_event(&as->queue, APM_USER_SUSPEND);
563 }
564 }
565
566 up_read(&user_list_lock);
567 mutex_unlock(&state_lock);
568 wake_up_interruptible(&apm_waitqueue);
569
570 /*
571 * Wait for the the suspend_acks_pending variable to drop to
572 * zero, meaning everybody acked the suspend event (or the
573 * process was killed.)
574 *
575 * If the app won't answer within a short while we assume it
576 * locked up and ignore it.
577 */
578 err = wait_event_interruptible_timeout(
579 apm_suspend_waitqueue,
580 atomic_read(&suspend_acks_pending) == 0,
581 5*HZ);
582
583 /* timed out */
584 if (err == 0) {
585 /*
586 * Move anybody who timed out to "ack timeout" state.
587 *
588 * We could time out and the userspace does the ACK
589 * right after we time out but before we enter the
590 * locked section here, but that's fine.
591 */
592 mutex_lock(&state_lock);
593 down_read(&user_list_lock);
594 list_for_each_entry(as, &apm_user_list, list) {
595 if (as->suspend_state == SUSPEND_PENDING ||
596 as->suspend_state == SUSPEND_READ) {
597 as->suspend_state = SUSPEND_ACKTO;
598 atomic_dec(&suspend_acks_pending);
599 }
600 }
601 up_read(&user_list_lock);
602 mutex_unlock(&state_lock);
603 }
604
605 /* let suspend proceed */
606 if (err >= 0)
607 return NOTIFY_OK;
608
609 /* interrupted by signal */
610 return NOTIFY_BAD;
611
612 case PM_POST_SUSPEND:
613 /*
614 * Anyone on the APM queues will think we're still suspended.
615 * Send a message so everyone knows we're now awake again.
616 */
617 queue_event(APM_NORMAL_RESUME);
618
619 /*
620 * Finally, wake up anyone who is sleeping on the suspend.
621 */
622 mutex_lock(&state_lock);
623 down_read(&user_list_lock);
624 list_for_each_entry(as, &apm_user_list, list) {
625 if (as->suspend_state == SUSPEND_ACKED) {
626 /*
627 * TODO: maybe grab error code, needs core
628 * changes to push the error to the notifier
629 * chain (could use the second parameter if
630 * implemented)
631 */
632 as->suspend_result = 0;
633 as->suspend_state = SUSPEND_DONE;
634 }
635 }
636 up_read(&user_list_lock);
637 mutex_unlock(&state_lock);
638
639 wake_up(&apm_suspend_waitqueue);
640 return NOTIFY_OK;
641
642 default:
643 return NOTIFY_DONE;
644 }
645}
646
647static struct notifier_block apm_notif_block = {
648 .notifier_call = apm_suspend_notifier,
649};
650
594static int __init apm_init(void) 651static int __init apm_init(void)
595{ 652{
596 int ret; 653 int ret;
@@ -604,7 +661,7 @@ static int __init apm_init(void)
604 if (IS_ERR(kapmd_tsk)) { 661 if (IS_ERR(kapmd_tsk)) {
605 ret = PTR_ERR(kapmd_tsk); 662 ret = PTR_ERR(kapmd_tsk);
606 kapmd_tsk = NULL; 663 kapmd_tsk = NULL;
607 return ret; 664 goto out;
608 } 665 }
609 wake_up_process(kapmd_tsk); 666 wake_up_process(kapmd_tsk);
610 667
@@ -613,16 +670,27 @@ static int __init apm_init(void)
613#endif 670#endif
614 671
615 ret = misc_register(&apm_device); 672 ret = misc_register(&apm_device);
616 if (ret != 0) { 673 if (ret)
617 remove_proc_entry("apm", NULL); 674 goto out_stop;
618 kthread_stop(kapmd_tsk);
619 }
620 675
676 ret = register_pm_notifier(&apm_notif_block);
677 if (ret)
678 goto out_unregister;
679
680 return 0;
681
682 out_unregister:
683 misc_deregister(&apm_device);
684 out_stop:
685 remove_proc_entry("apm", NULL);
686 kthread_stop(kapmd_tsk);
687 out:
621 return ret; 688 return ret;
622} 689}
623 690
624static void __exit apm_exit(void) 691static void __exit apm_exit(void)
625{ 692{
693 unregister_pm_notifier(&apm_notif_block);
626 misc_deregister(&apm_device); 694 misc_deregister(&apm_device);
627 remove_proc_entry("apm", NULL); 695 remove_proc_entry("apm", NULL);
628 696
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
new file mode 100644
index 000000000000..b650b4e48e50
--- /dev/null
+++ b/drivers/char/bsr.c
@@ -0,0 +1,312 @@
1/* IBM POWER Barrier Synchronization Register Driver
2 *
3 * Copyright IBM Corporation 2008
4 *
5 * Author: Sonny Rao <sonnyrao@us.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/kernel.h>
23#include <linux/of.h>
24#include <linux/of_device.h>
25#include <linux/of_platform.h>
26#include <linux/module.h>
27#include <linux/cdev.h>
28#include <linux/list.h>
29#include <linux/mm.h>
30#include <asm/io.h>
31
32/*
33 This driver exposes a special register which can be used for fast
34 synchronization across a large SMP machine. The hardware is exposed
35 as an array of bytes where each process will write to one of the bytes to
36 indicate it has finished the current stage and this update is broadcast to
37 all processors without having to bounce a cacheline between them. In
38 POWER5 and POWER6 there is one of these registers per SMP, but it is
39 presented in two forms; first, it is given as a whole and then as a number
40 of smaller registers which alias to parts of the single whole register.
41 This can potentially allow multiple groups of processes to each have their
42 own private synchronization device.
43
44 Note that this hardware *must* be written to using *only* single byte writes.
45 It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
46 this region is treated as cache-inhibited processes should also use a
47 full sync before and after writing to the BSR to ensure all stores and
48 the BSR update have made it to all chips in the system
49*/
50
51/* This is arbitrary number, up to Power6 it's been 17 or fewer */
52#define BSR_MAX_DEVS (32)
53
54struct bsr_dev {
55 u64 bsr_addr; /* Real address */
56 u64 bsr_len; /* length of mem region we can map */
57 unsigned bsr_bytes; /* size of the BSR reg itself */
58 unsigned bsr_stride; /* interval at which BSR repeats in the page */
59 unsigned bsr_type; /* maps to enum below */
60 unsigned bsr_num; /* bsr id number for its type */
61 int bsr_minor;
62
63 dev_t bsr_dev;
64 struct cdev bsr_cdev;
65 struct device *bsr_device;
66 char bsr_name[32];
67
68};
69
70static unsigned num_bsr_devs;
71static struct bsr_dev *bsr_devs;
72static struct class *bsr_class;
73static int bsr_major;
74
75enum {
76 BSR_8 = 0,
77 BSR_16 = 1,
78 BSR_64 = 2,
79 BSR_128 = 3,
80 BSR_UNKNOWN = 4,
81 BSR_MAX = 5,
82};
83
84static unsigned bsr_types[BSR_MAX];
85
86static ssize_t
87bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
88{
89 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
90 return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
91}
92
93static ssize_t
94bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
95{
96 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
97 return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
98}
99
100static ssize_t
101bsr_len_show(struct device *dev, struct device_attribute *attr, char *buf)
102{
103 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
104 return sprintf(buf, "%lu\n", bsr_dev->bsr_len);
105}
106
107static struct device_attribute bsr_dev_attrs[] = {
108 __ATTR(bsr_size, S_IRUGO, bsr_size_show, NULL),
109 __ATTR(bsr_stride, S_IRUGO, bsr_stride_show, NULL),
110 __ATTR(bsr_length, S_IRUGO, bsr_len_show, NULL),
111 __ATTR_NULL
112};
113
114static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
115{
116 unsigned long size = vma->vm_end - vma->vm_start;
117 struct bsr_dev *dev = filp->private_data;
118
119 if (size > dev->bsr_len || (size & (PAGE_SIZE-1)))
120 return -EINVAL;
121
122 vma->vm_flags |= (VM_IO | VM_DONTEXPAND);
123 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
124
125 if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT,
126 size, vma->vm_page_prot))
127 return -EAGAIN;
128
129 return 0;
130}
131
132static int bsr_open(struct inode * inode, struct file * filp)
133{
134 struct cdev *cdev = inode->i_cdev;
135 struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
136
137 filp->private_data = dev;
138 return 0;
139}
140
141const static struct file_operations bsr_fops = {
142 .owner = THIS_MODULE,
143 .mmap = bsr_mmap,
144 .open = bsr_open,
145};
146
147static void bsr_cleanup_devs(void)
148{
149 int i;
150 for (i=0 ; i < num_bsr_devs; i++) {
151 struct bsr_dev *cur = bsr_devs + i;
152 if (cur->bsr_device) {
153 cdev_del(&cur->bsr_cdev);
154 device_del(cur->bsr_device);
155 }
156 }
157
158 kfree(bsr_devs);
159}
160
161static int bsr_create_devs(struct device_node *bn)
162{
163 int bsr_stride_len, bsr_bytes_len;
164 const u32 *bsr_stride;
165 const u32 *bsr_bytes;
166 unsigned i;
167
168 bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
169 bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
170
171 if (!bsr_stride || !bsr_bytes ||
172 (bsr_stride_len != bsr_bytes_len)) {
173 printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
174 return -ENODEV;
175 }
176
177 num_bsr_devs = bsr_bytes_len / sizeof(u32);
178
179 /* only a warning, its informational since we'll fail and exit */
180 WARN_ON(num_bsr_devs > BSR_MAX_DEVS);
181
182 bsr_devs = kzalloc(sizeof(struct bsr_dev) * num_bsr_devs, GFP_KERNEL);
183 if (!bsr_devs)
184 return -ENOMEM;
185
186 for (i = 0 ; i < num_bsr_devs; i++) {
187 struct bsr_dev *cur = bsr_devs + i;
188 struct resource res;
189 int result;
190
191 result = of_address_to_resource(bn, i, &res);
192 if (result < 0) {
193 printk(KERN_ERR "bsr of-node has invalid reg property\n");
194 goto out_err;
195 }
196
197 cur->bsr_minor = i;
198 cur->bsr_addr = res.start;
199 cur->bsr_len = res.end - res.start + 1;
200 cur->bsr_bytes = bsr_bytes[i];
201 cur->bsr_stride = bsr_stride[i];
202 cur->bsr_dev = MKDEV(bsr_major, i);
203
204 switch(cur->bsr_bytes) {
205 case 8:
206 cur->bsr_type = BSR_8;
207 break;
208 case 16:
209 cur->bsr_type = BSR_16;
210 break;
211 case 64:
212 cur->bsr_type = BSR_64;
213 break;
214 case 128:
215 cur->bsr_type = BSR_128;
216 break;
217 default:
218 cur->bsr_type = BSR_UNKNOWN;
219 printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes);
220 }
221
222 cur->bsr_num = bsr_types[cur->bsr_type];
223 bsr_types[cur->bsr_type] = cur->bsr_num + 1;
224 snprintf(cur->bsr_name, 32, "bsr%d_%d",
225 cur->bsr_bytes, cur->bsr_num);
226
227 cdev_init(&cur->bsr_cdev, &bsr_fops);
228 result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
229 if (result)
230 goto out_err;
231
232 cur->bsr_device = device_create_drvdata(bsr_class, NULL,
233 cur->bsr_dev,
234 cur, cur->bsr_name);
235 if (!cur->bsr_device) {
236 printk(KERN_ERR "device_create failed for %s\n",
237 cur->bsr_name);
238 cdev_del(&cur->bsr_cdev);
239 goto out_err;
240 }
241 }
242
243 return 0;
244
245 out_err:
246
247 bsr_cleanup_devs();
248 return -ENODEV;
249}
250
251static int __init bsr_init(void)
252{
253 struct device_node *np;
254 dev_t bsr_dev = MKDEV(bsr_major, 0);
255 int ret = -ENODEV;
256 int result;
257
258 np = of_find_compatible_node(NULL, "ibm,bsr", "ibm,bsr");
259 if (!np)
260 goto out_err;
261
262 bsr_class = class_create(THIS_MODULE, "bsr");
263 if (IS_ERR(bsr_class)) {
264 printk(KERN_ERR "class_create() failed for bsr_class\n");
265 goto out_err_1;
266 }
267 bsr_class->dev_attrs = bsr_dev_attrs;
268
269 result = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
270 bsr_major = MAJOR(bsr_dev);
271 if (result < 0) {
272 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
273 goto out_err_2;
274 }
275
276 if ((ret = bsr_create_devs(np)) < 0)
277 goto out_err_3;
278
279 of_node_put(np);
280
281 return 0;
282
283 out_err_3:
284 unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
285
286 out_err_2:
287 class_destroy(bsr_class);
288
289 out_err_1:
290 of_node_put(np);
291
292 out_err:
293
294 return ret;
295}
296
297static void __exit bsr_exit(void)
298{
299
300 bsr_cleanup_devs();
301
302 if (bsr_class)
303 class_destroy(bsr_class);
304
305 if (bsr_major)
306 unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
307}
308
309module_init(bsr_init);
310module_exit(bsr_exit);
311MODULE_LICENSE("GPL");
312MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 44160d5ebca0..2f9759d625cc 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -675,12 +675,6 @@ static int hvc_poll(struct hvc_struct *hp)
675 return poll_mask; 675 return poll_mask;
676} 676}
677 677
678#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
679extern cpumask_t cpus_in_xmon;
680#else
681static const cpumask_t cpus_in_xmon = CPU_MASK_NONE;
682#endif
683
684/* 678/*
685 * This kthread is either polling or interrupt driven. This is determined by 679 * This kthread is either polling or interrupt driven. This is determined by
686 * calling hvc_poll() who determines whether a console adapter support 680 * calling hvc_poll() who determines whether a console adapter support
@@ -698,7 +692,7 @@ static int khvcd(void *unused)
698 hvc_kicked = 0; 692 hvc_kicked = 0;
699 try_to_freeze(); 693 try_to_freeze();
700 wmb(); 694 wmb();
701 if (cpus_empty(cpus_in_xmon)) { 695 if (!cpus_are_in_xmon()) {
702 spin_lock(&hvc_structs_lock); 696 spin_lock(&hvc_structs_lock);
703 list_for_each_entry(hp, &hvc_structs, next) { 697 list_for_each_entry(hp, &hvc_structs, next) {
704 poll_mask |= hvc_poll(hp); 698 poll_mask |= hvc_poll(hp);
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 8c59818050e6..42ffb17e15df 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -60,4 +60,14 @@ extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq,
60/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */ 60/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */
61extern int __devexit hvc_remove(struct hvc_struct *hp); 61extern int __devexit hvc_remove(struct hvc_struct *hp);
62 62
63
64#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
65#include <asm/xmon.h>
66#else
67static inline int cpus_are_in_xmon(void)
68{
69 return 0;
70}
71#endif
72
63#endif // HVC_CONSOLE_H 73#endif // HVC_CONSOLE_H
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 6d50e9bc700b..7fa61dd1d9d9 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -24,7 +24,7 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/hw_random.h> 25#include <linux/hw_random.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <asm/of_platform.h> 27#include <linux/of_platform.h>
28#include <asm/io.h> 28#include <asm/io.h>
29 29
30#define SDCRNG_CTL_REG 0x00 30#define SDCRNG_CTL_REG 0x00
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 59ca35156d81..e4a4fbd37d7a 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1439,7 +1439,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1439 DEBUGP(4, dev, "CMM_ABSENT flag set\n"); 1439 DEBUGP(4, dev, "CMM_ABSENT flag set\n");
1440 goto out; 1440 goto out;
1441 } 1441 }
1442 rc = EINVAL; 1442 rc = -EINVAL;
1443 1443
1444 if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) { 1444 if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) {
1445 DEBUGP(4, dev, "ioctype mismatch\n"); 1445 DEBUGP(4, dev, "ioctype mismatch\n");
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 977f7d35e769..e5da98d8f9cd 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -678,6 +678,17 @@ free_op:
678 return ret; 678 return ret;
679} 679}
680 680
681static long viotap_unlocked_ioctl(struct file *file,
682 unsigned int cmd, unsigned long arg)
683{
684 long rc;
685
686 lock_kernel();
687 rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
688 unlock_kernel();
689 return rc;
690}
691
681static int viotap_open(struct inode *inode, struct file *file) 692static int viotap_open(struct inode *inode, struct file *file)
682{ 693{
683 HvLpEvent_Rc hvrc; 694 HvLpEvent_Rc hvrc;
@@ -786,12 +797,12 @@ free_op:
786} 797}
787 798
788const struct file_operations viotap_fops = { 799const struct file_operations viotap_fops = {
789 .owner = THIS_MODULE, 800 .owner = THIS_MODULE,
790 .read = viotap_read, 801 .read = viotap_read,
791 .write = viotap_write, 802 .write = viotap_write,
792 .ioctl = viotap_ioctl, 803 .unlocked_ioctl = viotap_unlocked_ioctl,
793 .open = viotap_open, 804 .open = viotap_open,
794 .release = viotap_release, 805 .release = viotap_release,
795}; 806};
796 807
797/* Handle interrupt events for tape */ 808/* Handle interrupt events for tape */
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index 466b9ee92797..f97b5b356875 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -23,12 +23,9 @@
23 23
24#include "lm75.h" 24#include "lm75.h"
25 25
26#define DRV_VERSION "0.3" 26#define DRV_VERSION "0.4"
27 27
28/* Addresses to scan */ 28enum chips { ad7416, ad7417, ad7418 };
29static const unsigned short normal_i2c[] = { 0x28, I2C_CLIENT_END };
30/* Insmod parameters */
31I2C_CLIENT_INSMOD_3(ad7416, ad7417, ad7418);
32 29
33/* AD7418 registers */ 30/* AD7418 registers */
34#define AD7418_REG_TEMP_IN 0x00 31#define AD7418_REG_TEMP_IN 0x00
@@ -46,7 +43,6 @@ static const u8 AD7418_REG_TEMP[] = { AD7418_REG_TEMP_IN,
46 AD7418_REG_TEMP_OS }; 43 AD7418_REG_TEMP_OS };
47 44
48struct ad7418_data { 45struct ad7418_data {
49 struct i2c_client client;
50 struct device *hwmon_dev; 46 struct device *hwmon_dev;
51 struct attribute_group attrs; 47 struct attribute_group attrs;
52 enum chips type; 48 enum chips type;
@@ -58,16 +54,25 @@ struct ad7418_data {
58 u16 in[4]; 54 u16 in[4];
59}; 55};
60 56
61static int ad7418_attach_adapter(struct i2c_adapter *adapter); 57static int ad7418_probe(struct i2c_client *client,
62static int ad7418_detect(struct i2c_adapter *adapter, int address, int kind); 58 const struct i2c_device_id *id);
63static int ad7418_detach_client(struct i2c_client *client); 59static int ad7418_remove(struct i2c_client *client);
60
61static const struct i2c_device_id ad7418_id[] = {
62 { "ad7416", ad7416 },
63 { "ad7417", ad7417 },
64 { "ad7418", ad7418 },
65 { }
66};
67MODULE_DEVICE_TABLE(i2c, ad7418_id);
64 68
65static struct i2c_driver ad7418_driver = { 69static struct i2c_driver ad7418_driver = {
66 .driver = { 70 .driver = {
67 .name = "ad7418", 71 .name = "ad7418",
68 }, 72 },
69 .attach_adapter = ad7418_attach_adapter, 73 .probe = ad7418_probe,
70 .detach_client = ad7418_detach_client, 74 .remove = ad7418_remove,
75 .id_table = ad7418_id,
71}; 76};
72 77
73/* All registers are word-sized, except for the configuration registers. 78/* All registers are word-sized, except for the configuration registers.
@@ -192,13 +197,6 @@ static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_adc, NULL, 1);
192static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_adc, NULL, 2); 197static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_adc, NULL, 2);
193static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_adc, NULL, 3); 198static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_adc, NULL, 3);
194 199
195static int ad7418_attach_adapter(struct i2c_adapter *adapter)
196{
197 if (!(adapter->class & I2C_CLASS_HWMON))
198 return 0;
199 return i2c_probe(adapter, &addr_data, ad7418_detect);
200}
201
202static struct attribute *ad7416_attributes[] = { 200static struct attribute *ad7416_attributes[] = {
203 &sensor_dev_attr_temp1_max.dev_attr.attr, 201 &sensor_dev_attr_temp1_max.dev_attr.attr,
204 &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, 202 &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
@@ -225,98 +223,46 @@ static struct attribute *ad7418_attributes[] = {
225 NULL 223 NULL
226}; 224};
227 225
228static int ad7418_detect(struct i2c_adapter *adapter, int address, int kind) 226static int ad7418_probe(struct i2c_client *client,
227 const struct i2c_device_id *id)
229{ 228{
230 struct i2c_client *client; 229 struct i2c_adapter *adapter = client->adapter;
231 struct ad7418_data *data; 230 struct ad7418_data *data;
232 int err = 0; 231 int err;
233 232
234 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | 233 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
235 I2C_FUNC_SMBUS_WORD_DATA)) 234 I2C_FUNC_SMBUS_WORD_DATA)) {
235 err = -EOPNOTSUPP;
236 goto exit; 236 goto exit;
237 }
237 238
238 if (!(data = kzalloc(sizeof(struct ad7418_data), GFP_KERNEL))) { 239 if (!(data = kzalloc(sizeof(struct ad7418_data), GFP_KERNEL))) {
239 err = -ENOMEM; 240 err = -ENOMEM;
240 goto exit; 241 goto exit;
241 } 242 }
242 243
243 client = &data->client;
244 client->addr = address;
245 client->adapter = adapter;
246 client->driver = &ad7418_driver;
247
248 i2c_set_clientdata(client, data); 244 i2c_set_clientdata(client, data);
249 245
250 mutex_init(&data->lock); 246 mutex_init(&data->lock);
251 247 data->type = id->driver_data;
252 /* AD7418 has a curious behaviour on registers 6 and 7. They
253 * both always read 0xC071 and are not documented on the datasheet.
254 * We use them to detect the chip.
255 */
256 if (kind <= 0) {
257 int reg, reg6, reg7;
258
259 /* the AD7416 lies within this address range, but I have
260 * no means to check.
261 */
262 if (address >= 0x48 && address <= 0x4f) {
263 /* XXX add tests for AD7416 here */
264 /* data->type = ad7416; */
265 }
266 /* here we might have AD7417 or AD7418 */
267 else if (address >= 0x28 && address <= 0x2f) {
268 reg6 = i2c_smbus_read_word_data(client, 0x06);
269 reg7 = i2c_smbus_read_word_data(client, 0x07);
270
271 if (address == 0x28 && reg6 == 0xC071 && reg7 == 0xC071)
272 data->type = ad7418;
273
274 /* XXX add tests for AD7417 here */
275
276
277 /* both AD7417 and AD7418 have bits 0-5 of
278 * the CONF2 register at 0
279 */
280 reg = i2c_smbus_read_byte_data(client,
281 AD7418_REG_CONF2);
282 if (reg & 0x3F)
283 data->type = any_chip; /* detection failed */
284 }
285 } else {
286 dev_dbg(&adapter->dev, "detection forced\n");
287 }
288
289 if (kind > 0)
290 data->type = kind;
291 else if (kind < 0 && data->type == any_chip) {
292 err = -ENODEV;
293 goto exit_free;
294 }
295 248
296 switch (data->type) { 249 switch (data->type) {
297 case any_chip:
298 case ad7416: 250 case ad7416:
299 data->adc_max = 0; 251 data->adc_max = 0;
300 data->attrs.attrs = ad7416_attributes; 252 data->attrs.attrs = ad7416_attributes;
301 strlcpy(client->name, "ad7416", I2C_NAME_SIZE);
302 break; 253 break;
303 254
304 case ad7417: 255 case ad7417:
305 data->adc_max = 4; 256 data->adc_max = 4;
306 data->attrs.attrs = ad7417_attributes; 257 data->attrs.attrs = ad7417_attributes;
307 strlcpy(client->name, "ad7417", I2C_NAME_SIZE);
308 break; 258 break;
309 259
310 case ad7418: 260 case ad7418:
311 data->adc_max = 1; 261 data->adc_max = 1;
312 data->attrs.attrs = ad7418_attributes; 262 data->attrs.attrs = ad7418_attributes;
313 strlcpy(client->name, "ad7418", I2C_NAME_SIZE);
314 break; 263 break;
315 } 264 }
316 265
317 if ((err = i2c_attach_client(client)))
318 goto exit_free;
319
320 dev_info(&client->dev, "%s chip found\n", client->name); 266 dev_info(&client->dev, "%s chip found\n", client->name);
321 267
322 /* Initialize the AD7418 chip */ 268 /* Initialize the AD7418 chip */
@@ -324,7 +270,7 @@ static int ad7418_detect(struct i2c_adapter *adapter, int address, int kind)
324 270
325 /* Register sysfs hooks */ 271 /* Register sysfs hooks */
326 if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs))) 272 if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs)))
327 goto exit_detach; 273 goto exit_free;
328 274
329 data->hwmon_dev = hwmon_device_register(&client->dev); 275 data->hwmon_dev = hwmon_device_register(&client->dev);
330 if (IS_ERR(data->hwmon_dev)) { 276 if (IS_ERR(data->hwmon_dev)) {
@@ -336,20 +282,17 @@ static int ad7418_detect(struct i2c_adapter *adapter, int address, int kind)
336 282
337exit_remove: 283exit_remove:
338 sysfs_remove_group(&client->dev.kobj, &data->attrs); 284 sysfs_remove_group(&client->dev.kobj, &data->attrs);
339exit_detach:
340 i2c_detach_client(client);
341exit_free: 285exit_free:
342 kfree(data); 286 kfree(data);
343exit: 287exit:
344 return err; 288 return err;
345} 289}
346 290
347static int ad7418_detach_client(struct i2c_client *client) 291static int ad7418_remove(struct i2c_client *client)
348{ 292{
349 struct ad7418_data *data = i2c_get_clientdata(client); 293 struct ad7418_data *data = i2c_get_clientdata(client);
350 hwmon_device_unregister(data->hwmon_dev); 294 hwmon_device_unregister(data->hwmon_dev);
351 sysfs_remove_group(&client->dev.kobj, &data->attrs); 295 sysfs_remove_group(&client->dev.kobj, &data->attrs);
352 i2c_detach_client(client);
353 kfree(data); 296 kfree(data);
354 return 0; 297 return 0;
355} 298}
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index ecbf69484bf5..b11e06f644b1 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -78,7 +78,6 @@ clearing it. Weird, ey? --Phil */
78 78
79/* Each client has this additional data */ 79/* Each client has this additional data */
80struct adm1021_data { 80struct adm1021_data {
81 struct i2c_client client;
82 struct device *hwmon_dev; 81 struct device *hwmon_dev;
83 enum chips type; 82 enum chips type;
84 83
@@ -98,23 +97,42 @@ struct adm1021_data {
98 u8 remote_temp_offset_prec; 97 u8 remote_temp_offset_prec;
99}; 98};
100 99
101static int adm1021_attach_adapter(struct i2c_adapter *adapter); 100static int adm1021_probe(struct i2c_client *client,
102static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind); 101 const struct i2c_device_id *id);
102static int adm1021_detect(struct i2c_client *client, int kind,
103 struct i2c_board_info *info);
103static void adm1021_init_client(struct i2c_client *client); 104static void adm1021_init_client(struct i2c_client *client);
104static int adm1021_detach_client(struct i2c_client *client); 105static int adm1021_remove(struct i2c_client *client);
105static struct adm1021_data *adm1021_update_device(struct device *dev); 106static struct adm1021_data *adm1021_update_device(struct device *dev);
106 107
107/* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */ 108/* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */
108static int read_only; 109static int read_only;
109 110
110 111
112static const struct i2c_device_id adm1021_id[] = {
113 { "adm1021", adm1021 },
114 { "adm1023", adm1023 },
115 { "max1617", max1617 },
116 { "max1617a", max1617a },
117 { "thmc10", thmc10 },
118 { "lm84", lm84 },
119 { "gl523sm", gl523sm },
120 { "mc1066", mc1066 },
121 { }
122};
123MODULE_DEVICE_TABLE(i2c, adm1021_id);
124
111/* This is the driver that will be inserted */ 125/* This is the driver that will be inserted */
112static struct i2c_driver adm1021_driver = { 126static struct i2c_driver adm1021_driver = {
127 .class = I2C_CLASS_HWMON,
113 .driver = { 128 .driver = {
114 .name = "adm1021", 129 .name = "adm1021",
115 }, 130 },
116 .attach_adapter = adm1021_attach_adapter, 131 .probe = adm1021_probe,
117 .detach_client = adm1021_detach_client, 132 .remove = adm1021_remove,
133 .id_table = adm1021_id,
134 .detect = adm1021_detect,
135 .address_data = &addr_data,
118}; 136};
119 137
120static ssize_t show_temp(struct device *dev, 138static ssize_t show_temp(struct device *dev,
@@ -216,13 +234,6 @@ static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
216 234
217static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); 235static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
218 236
219static int adm1021_attach_adapter(struct i2c_adapter *adapter)
220{
221 if (!(adapter->class & I2C_CLASS_HWMON))
222 return 0;
223 return i2c_probe(adapter, &addr_data, adm1021_detect);
224}
225
226static struct attribute *adm1021_attributes[] = { 237static struct attribute *adm1021_attributes[] = {
227 &sensor_dev_attr_temp1_max.dev_attr.attr, 238 &sensor_dev_attr_temp1_max.dev_attr.attr,
228 &sensor_dev_attr_temp1_min.dev_attr.attr, 239 &sensor_dev_attr_temp1_min.dev_attr.attr,
@@ -243,36 +254,21 @@ static const struct attribute_group adm1021_group = {
243 .attrs = adm1021_attributes, 254 .attrs = adm1021_attributes,
244}; 255};
245 256
246static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind) 257/* Return 0 if detection is successful, -ENODEV otherwise */
258static int adm1021_detect(struct i2c_client *client, int kind,
259 struct i2c_board_info *info)
247{ 260{
261 struct i2c_adapter *adapter = client->adapter;
248 int i; 262 int i;
249 struct i2c_client *client;
250 struct adm1021_data *data;
251 int err = 0;
252 const char *type_name = ""; 263 const char *type_name = "";
253 int conv_rate, status, config; 264 int conv_rate, status, config;
254 265
255 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 266 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
256 pr_debug("adm1021: detect failed, " 267 pr_debug("adm1021: detect failed, "
257 "smbus byte data not supported!\n"); 268 "smbus byte data not supported!\n");
258 goto error0; 269 return -ENODEV;
259 }
260
261 /* OK. For now, we presume we have a valid client. We now create the
262 client structure, even though we cannot fill it completely yet.
263 But it allows us to access adm1021 register values. */
264
265 if (!(data = kzalloc(sizeof(struct adm1021_data), GFP_KERNEL))) {
266 pr_debug("adm1021: detect failed, kzalloc failed!\n");
267 err = -ENOMEM;
268 goto error0;
269 } 270 }
270 271
271 client = &data->client;
272 i2c_set_clientdata(client, data);
273 client->addr = address;
274 client->adapter = adapter;
275 client->driver = &adm1021_driver;
276 status = i2c_smbus_read_byte_data(client, ADM1021_REG_STATUS); 272 status = i2c_smbus_read_byte_data(client, ADM1021_REG_STATUS);
277 conv_rate = i2c_smbus_read_byte_data(client, 273 conv_rate = i2c_smbus_read_byte_data(client,
278 ADM1021_REG_CONV_RATE_R); 274 ADM1021_REG_CONV_RATE_R);
@@ -284,8 +280,7 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind)
284 || (conv_rate & 0xF8) != 0x00) { 280 || (conv_rate & 0xF8) != 0x00) {
285 pr_debug("adm1021: detect failed, " 281 pr_debug("adm1021: detect failed, "
286 "chip not detected!\n"); 282 "chip not detected!\n");
287 err = -ENODEV; 283 return -ENODEV;
288 goto error1;
289 } 284 }
290 } 285 }
291 286
@@ -336,24 +331,36 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind)
336 type_name = "mc1066"; 331 type_name = "mc1066";
337 } 332 }
338 pr_debug("adm1021: Detected chip %s at adapter %d, address 0x%02x.\n", 333 pr_debug("adm1021: Detected chip %s at adapter %d, address 0x%02x.\n",
339 type_name, i2c_adapter_id(adapter), address); 334 type_name, i2c_adapter_id(adapter), client->addr);
335 strlcpy(info->type, type_name, I2C_NAME_SIZE);
340 336
341 /* Fill in the remaining client fields */ 337 return 0;
342 strlcpy(client->name, type_name, I2C_NAME_SIZE); 338}
343 data->type = kind;
344 mutex_init(&data->update_lock);
345 339
346 /* Tell the I2C layer a new client has arrived */ 340static int adm1021_probe(struct i2c_client *client,
347 if ((err = i2c_attach_client(client))) 341 const struct i2c_device_id *id)
348 goto error1; 342{
343 struct adm1021_data *data;
344 int err;
345
346 data = kzalloc(sizeof(struct adm1021_data), GFP_KERNEL);
347 if (!data) {
348 pr_debug("adm1021: detect failed, kzalloc failed!\n");
349 err = -ENOMEM;
350 goto error0;
351 }
352
353 i2c_set_clientdata(client, data);
354 data->type = id->driver_data;
355 mutex_init(&data->update_lock);
349 356
350 /* Initialize the ADM1021 chip */ 357 /* Initialize the ADM1021 chip */
351 if (kind != lm84 && !read_only) 358 if (data->type != lm84 && !read_only)
352 adm1021_init_client(client); 359 adm1021_init_client(client);
353 360
354 /* Register sysfs hooks */ 361 /* Register sysfs hooks */
355 if ((err = sysfs_create_group(&client->dev.kobj, &adm1021_group))) 362 if ((err = sysfs_create_group(&client->dev.kobj, &adm1021_group)))
356 goto error2; 363 goto error1;
357 364
358 data->hwmon_dev = hwmon_device_register(&client->dev); 365 data->hwmon_dev = hwmon_device_register(&client->dev);
359 if (IS_ERR(data->hwmon_dev)) { 366 if (IS_ERR(data->hwmon_dev)) {
@@ -365,8 +372,6 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind)
365 372
366error3: 373error3:
367 sysfs_remove_group(&client->dev.kobj, &adm1021_group); 374 sysfs_remove_group(&client->dev.kobj, &adm1021_group);
368error2:
369 i2c_detach_client(client);
370error1: 375error1:
371 kfree(data); 376 kfree(data);
372error0: 377error0:
@@ -382,17 +387,13 @@ static void adm1021_init_client(struct i2c_client *client)
382 i2c_smbus_write_byte_data(client, ADM1021_REG_CONV_RATE_W, 0x04); 387 i2c_smbus_write_byte_data(client, ADM1021_REG_CONV_RATE_W, 0x04);
383} 388}
384 389
385static int adm1021_detach_client(struct i2c_client *client) 390static int adm1021_remove(struct i2c_client *client)
386{ 391{
387 struct adm1021_data *data = i2c_get_clientdata(client); 392 struct adm1021_data *data = i2c_get_clientdata(client);
388 int err;
389 393
390 hwmon_device_unregister(data->hwmon_dev); 394 hwmon_device_unregister(data->hwmon_dev);
391 sysfs_remove_group(&client->dev.kobj, &adm1021_group); 395 sysfs_remove_group(&client->dev.kobj, &adm1021_group);
392 396
393 if ((err = i2c_detach_client(client)))
394 return err;
395
396 kfree(data); 397 kfree(data);
397 return 0; 398 return 0;
398} 399}
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 1d76de7d75c7..4db04d603ec9 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -2,7 +2,7 @@
2 * adm1025.c 2 * adm1025.c
3 * 3 *
4 * Copyright (C) 2000 Chen-Yuan Wu <gwu@esoft.com> 4 * Copyright (C) 2000 Chen-Yuan Wu <gwu@esoft.com>
5 * Copyright (C) 2003-2004 Jean Delvare <khali@linux-fr.org> 5 * Copyright (C) 2003-2008 Jean Delvare <khali@linux-fr.org>
6 * 6 *
7 * The ADM1025 is a sensor chip made by Analog Devices. It reports up to 6 7 * The ADM1025 is a sensor chip made by Analog Devices. It reports up to 6
8 * voltages (including its own power source) and up to two temperatures 8 * voltages (including its own power source) and up to two temperatures
@@ -109,22 +109,35 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 };
109 * Functions declaration 109 * Functions declaration
110 */ 110 */
111 111
112static int adm1025_attach_adapter(struct i2c_adapter *adapter); 112static int adm1025_probe(struct i2c_client *client,
113static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind); 113 const struct i2c_device_id *id);
114static int adm1025_detect(struct i2c_client *client, int kind,
115 struct i2c_board_info *info);
114static void adm1025_init_client(struct i2c_client *client); 116static void adm1025_init_client(struct i2c_client *client);
115static int adm1025_detach_client(struct i2c_client *client); 117static int adm1025_remove(struct i2c_client *client);
116static struct adm1025_data *adm1025_update_device(struct device *dev); 118static struct adm1025_data *adm1025_update_device(struct device *dev);
117 119
118/* 120/*
119 * Driver data (common to all clients) 121 * Driver data (common to all clients)
120 */ 122 */
121 123
124static const struct i2c_device_id adm1025_id[] = {
125 { "adm1025", adm1025 },
126 { "ne1619", ne1619 },
127 { }
128};
129MODULE_DEVICE_TABLE(i2c, adm1025_id);
130
122static struct i2c_driver adm1025_driver = { 131static struct i2c_driver adm1025_driver = {
132 .class = I2C_CLASS_HWMON,
123 .driver = { 133 .driver = {
124 .name = "adm1025", 134 .name = "adm1025",
125 }, 135 },
126 .attach_adapter = adm1025_attach_adapter, 136 .probe = adm1025_probe,
127 .detach_client = adm1025_detach_client, 137 .remove = adm1025_remove,
138 .id_table = adm1025_id,
139 .detect = adm1025_detect,
140 .address_data = &addr_data,
128}; 141};
129 142
130/* 143/*
@@ -132,7 +145,6 @@ static struct i2c_driver adm1025_driver = {
132 */ 145 */
133 146
134struct adm1025_data { 147struct adm1025_data {
135 struct i2c_client client;
136 struct device *hwmon_dev; 148 struct device *hwmon_dev;
137 struct mutex update_lock; 149 struct mutex update_lock;
138 char valid; /* zero until following fields are valid */ 150 char valid; /* zero until following fields are valid */
@@ -344,13 +356,6 @@ static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
344 * Real code 356 * Real code
345 */ 357 */
346 358
347static int adm1025_attach_adapter(struct i2c_adapter *adapter)
348{
349 if (!(adapter->class & I2C_CLASS_HWMON))
350 return 0;
351 return i2c_probe(adapter, &addr_data, adm1025_detect);
352}
353
354static struct attribute *adm1025_attributes[] = { 359static struct attribute *adm1025_attributes[] = {
355 &sensor_dev_attr_in0_input.dev_attr.attr, 360 &sensor_dev_attr_in0_input.dev_attr.attr,
356 &sensor_dev_attr_in1_input.dev_attr.attr, 361 &sensor_dev_attr_in1_input.dev_attr.attr,
@@ -403,31 +408,16 @@ static const struct attribute_group adm1025_group_in4 = {
403 .attrs = adm1025_attributes_in4, 408 .attrs = adm1025_attributes_in4,
404}; 409};
405 410
406/* 411/* Return 0 if detection is successful, -ENODEV otherwise */
407 * The following function does more than just detection. If detection 412static int adm1025_detect(struct i2c_client *client, int kind,
408 * succeeds, it also registers the new chip. 413 struct i2c_board_info *info)
409 */
410static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind)
411{ 414{
412 struct i2c_client *client; 415 struct i2c_adapter *adapter = client->adapter;
413 struct adm1025_data *data;
414 int err = 0;
415 const char *name = ""; 416 const char *name = "";
416 u8 config; 417 u8 config;
417 418
418 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 419 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
419 goto exit; 420 return -ENODEV;
420
421 if (!(data = kzalloc(sizeof(struct adm1025_data), GFP_KERNEL))) {
422 err = -ENOMEM;
423 goto exit;
424 }
425
426 client = &data->client;
427 i2c_set_clientdata(client, data);
428 client->addr = address;
429 client->adapter = adapter;
430 client->driver = &adm1025_driver;
431 421
432 /* 422 /*
433 * Now we do the remaining detection. A negative kind means that 423 * Now we do the remaining detection. A negative kind means that
@@ -448,8 +438,8 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind)
448 ADM1025_REG_STATUS2) & 0xBC) != 0x00) { 438 ADM1025_REG_STATUS2) & 0xBC) != 0x00) {
449 dev_dbg(&adapter->dev, 439 dev_dbg(&adapter->dev,
450 "ADM1025 detection failed at 0x%02x.\n", 440 "ADM1025 detection failed at 0x%02x.\n",
451 address); 441 client->addr);
452 goto exit_free; 442 return -ENODEV;
453 } 443 }
454 } 444 }
455 445
@@ -465,7 +455,7 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind)
465 } 455 }
466 } else 456 } else
467 if (man_id == 0xA1) { /* Philips */ 457 if (man_id == 0xA1) { /* Philips */
468 if (address != 0x2E 458 if (client->addr != 0x2E
469 && (chip_id & 0xF0) == 0x20) { /* NE1619 */ 459 && (chip_id & 0xF0) == 0x20) { /* NE1619 */
470 kind = ne1619; 460 kind = ne1619;
471 } 461 }
@@ -475,7 +465,7 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind)
475 dev_info(&adapter->dev, 465 dev_info(&adapter->dev,
476 "Unsupported chip (man_id=0x%02X, " 466 "Unsupported chip (man_id=0x%02X, "
477 "chip_id=0x%02X).\n", man_id, chip_id); 467 "chip_id=0x%02X).\n", man_id, chip_id);
478 goto exit_free; 468 return -ENODEV;
479 } 469 }
480 } 470 }
481 471
@@ -484,23 +474,36 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind)
484 } else if (kind == ne1619) { 474 } else if (kind == ne1619) {
485 name = "ne1619"; 475 name = "ne1619";
486 } 476 }
477 strlcpy(info->type, name, I2C_NAME_SIZE);
487 478
488 /* We can fill in the remaining client fields */ 479 return 0;
489 strlcpy(client->name, name, I2C_NAME_SIZE); 480}
490 mutex_init(&data->update_lock);
491 481
492 /* Tell the I2C layer a new client has arrived */ 482static int adm1025_probe(struct i2c_client *client,
493 if ((err = i2c_attach_client(client))) 483 const struct i2c_device_id *id)
494 goto exit_free; 484{
485 struct adm1025_data *data;
486 int err;
487 u8 config;
488
489 data = kzalloc(sizeof(struct adm1025_data), GFP_KERNEL);
490 if (!data) {
491 err = -ENOMEM;
492 goto exit;
493 }
494
495 i2c_set_clientdata(client, data);
496 mutex_init(&data->update_lock);
495 497
496 /* Initialize the ADM1025 chip */ 498 /* Initialize the ADM1025 chip */
497 adm1025_init_client(client); 499 adm1025_init_client(client);
498 500
499 /* Register sysfs hooks */ 501 /* Register sysfs hooks */
500 if ((err = sysfs_create_group(&client->dev.kobj, &adm1025_group))) 502 if ((err = sysfs_create_group(&client->dev.kobj, &adm1025_group)))
501 goto exit_detach; 503 goto exit_free;
502 504
503 /* Pin 11 is either in4 (+12V) or VID4 */ 505 /* Pin 11 is either in4 (+12V) or VID4 */
506 config = i2c_smbus_read_byte_data(client, ADM1025_REG_CONFIG);
504 if (!(config & 0x20)) { 507 if (!(config & 0x20)) {
505 if ((err = sysfs_create_group(&client->dev.kobj, 508 if ((err = sysfs_create_group(&client->dev.kobj,
506 &adm1025_group_in4))) 509 &adm1025_group_in4)))
@@ -518,8 +521,6 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind)
518exit_remove: 521exit_remove:
519 sysfs_remove_group(&client->dev.kobj, &adm1025_group); 522 sysfs_remove_group(&client->dev.kobj, &adm1025_group);
520 sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4); 523 sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4);
521exit_detach:
522 i2c_detach_client(client);
523exit_free: 524exit_free:
524 kfree(data); 525 kfree(data);
525exit: 526exit:
@@ -568,18 +569,14 @@ static void adm1025_init_client(struct i2c_client *client)
568 (reg&0x7E)|0x01); 569 (reg&0x7E)|0x01);
569} 570}
570 571
571static int adm1025_detach_client(struct i2c_client *client) 572static int adm1025_remove(struct i2c_client *client)
572{ 573{
573 struct adm1025_data *data = i2c_get_clientdata(client); 574 struct adm1025_data *data = i2c_get_clientdata(client);
574 int err;
575 575
576 hwmon_device_unregister(data->hwmon_dev); 576 hwmon_device_unregister(data->hwmon_dev);
577 sysfs_remove_group(&client->dev.kobj, &adm1025_group); 577 sysfs_remove_group(&client->dev.kobj, &adm1025_group);
578 sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4); 578 sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4);
579 579
580 if ((err = i2c_detach_client(client)))
581 return err;
582
583 kfree(data); 580 kfree(data);
584 return 0; 581 return 0;
585} 582}
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 904c6ce9d83f..7fe2441fc845 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -259,7 +259,6 @@ struct pwm_data {
259}; 259};
260 260
261struct adm1026_data { 261struct adm1026_data {
262 struct i2c_client client;
263 struct device *hwmon_dev; 262 struct device *hwmon_dev;
264 263
265 struct mutex update_lock; 264 struct mutex update_lock;
@@ -293,10 +292,11 @@ struct adm1026_data {
293 u8 config3; /* Register value */ 292 u8 config3; /* Register value */
294}; 293};
295 294
296static int adm1026_attach_adapter(struct i2c_adapter *adapter); 295static int adm1026_probe(struct i2c_client *client,
297static int adm1026_detect(struct i2c_adapter *adapter, int address, 296 const struct i2c_device_id *id);
298 int kind); 297static int adm1026_detect(struct i2c_client *client, int kind,
299static int adm1026_detach_client(struct i2c_client *client); 298 struct i2c_board_info *info);
299static int adm1026_remove(struct i2c_client *client);
300static int adm1026_read_value(struct i2c_client *client, u8 reg); 300static int adm1026_read_value(struct i2c_client *client, u8 reg);
301static int adm1026_write_value(struct i2c_client *client, u8 reg, int value); 301static int adm1026_write_value(struct i2c_client *client, u8 reg, int value);
302static void adm1026_print_gpio(struct i2c_client *client); 302static void adm1026_print_gpio(struct i2c_client *client);
@@ -305,22 +305,24 @@ static struct adm1026_data *adm1026_update_device(struct device *dev);
305static void adm1026_init_client(struct i2c_client *client); 305static void adm1026_init_client(struct i2c_client *client);
306 306
307 307
308static const struct i2c_device_id adm1026_id[] = {
309 { "adm1026", adm1026 },
310 { }
311};
312MODULE_DEVICE_TABLE(i2c, adm1026_id);
313
308static struct i2c_driver adm1026_driver = { 314static struct i2c_driver adm1026_driver = {
315 .class = I2C_CLASS_HWMON,
309 .driver = { 316 .driver = {
310 .name = "adm1026", 317 .name = "adm1026",
311 }, 318 },
312 .attach_adapter = adm1026_attach_adapter, 319 .probe = adm1026_probe,
313 .detach_client = adm1026_detach_client, 320 .remove = adm1026_remove,
321 .id_table = adm1026_id,
322 .detect = adm1026_detect,
323 .address_data = &addr_data,
314}; 324};
315 325
316static int adm1026_attach_adapter(struct i2c_adapter *adapter)
317{
318 if (!(adapter->class & I2C_CLASS_HWMON)) {
319 return 0;
320 }
321 return i2c_probe(adapter, &addr_data, adm1026_detect);
322}
323
324static int adm1026_read_value(struct i2c_client *client, u8 reg) 326static int adm1026_read_value(struct i2c_client *client, u8 reg)
325{ 327{
326 int res; 328 int res;
@@ -1647,48 +1649,32 @@ static const struct attribute_group adm1026_group_in8_9 = {
1647 .attrs = adm1026_attributes_in8_9, 1649 .attrs = adm1026_attributes_in8_9,
1648}; 1650};
1649 1651
1650static int adm1026_detect(struct i2c_adapter *adapter, int address, 1652/* Return 0 if detection is successful, -ENODEV otherwise */
1651 int kind) 1653static int adm1026_detect(struct i2c_client *client, int kind,
1654 struct i2c_board_info *info)
1652{ 1655{
1656 struct i2c_adapter *adapter = client->adapter;
1657 int address = client->addr;
1653 int company, verstep; 1658 int company, verstep;
1654 struct i2c_client *client;
1655 struct adm1026_data *data;
1656 int err = 0;
1657 const char *type_name = "";
1658 1659
1659 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 1660 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1660 /* We need to be able to do byte I/O */ 1661 /* We need to be able to do byte I/O */
1661 goto exit; 1662 return -ENODEV;
1662 }; 1663 };
1663 1664
1664 /* OK. For now, we presume we have a valid client. We now create the
1665 client structure, even though we cannot fill it completely yet.
1666 But it allows us to access adm1026_{read,write}_value. */
1667
1668 if (!(data = kzalloc(sizeof(struct adm1026_data), GFP_KERNEL))) {
1669 err = -ENOMEM;
1670 goto exit;
1671 }
1672
1673 client = &data->client;
1674 i2c_set_clientdata(client, data);
1675 client->addr = address;
1676 client->adapter = adapter;
1677 client->driver = &adm1026_driver;
1678
1679 /* Now, we do the remaining detection. */ 1665 /* Now, we do the remaining detection. */
1680 1666
1681 company = adm1026_read_value(client, ADM1026_REG_COMPANY); 1667 company = adm1026_read_value(client, ADM1026_REG_COMPANY);
1682 verstep = adm1026_read_value(client, ADM1026_REG_VERSTEP); 1668 verstep = adm1026_read_value(client, ADM1026_REG_VERSTEP);
1683 1669
1684 dev_dbg(&client->dev, "Detecting device at %d,0x%02x with" 1670 dev_dbg(&adapter->dev, "Detecting device at %d,0x%02x with"
1685 " COMPANY: 0x%02x and VERSTEP: 0x%02x\n", 1671 " COMPANY: 0x%02x and VERSTEP: 0x%02x\n",
1686 i2c_adapter_id(client->adapter), client->addr, 1672 i2c_adapter_id(client->adapter), client->addr,
1687 company, verstep); 1673 company, verstep);
1688 1674
1689 /* If auto-detecting, Determine the chip type. */ 1675 /* If auto-detecting, Determine the chip type. */
1690 if (kind <= 0) { 1676 if (kind <= 0) {
1691 dev_dbg(&client->dev, "Autodetecting device at %d,0x%02x " 1677 dev_dbg(&adapter->dev, "Autodetecting device at %d,0x%02x "
1692 "...\n", i2c_adapter_id(adapter), address); 1678 "...\n", i2c_adapter_id(adapter), address);
1693 if (company == ADM1026_COMPANY_ANALOG_DEV 1679 if (company == ADM1026_COMPANY_ANALOG_DEV
1694 && verstep == ADM1026_VERSTEP_ADM1026) { 1680 && verstep == ADM1026_VERSTEP_ADM1026) {
@@ -1704,7 +1690,7 @@ static int adm1026_detect(struct i2c_adapter *adapter, int address,
1704 verstep); 1690 verstep);
1705 kind = any_chip; 1691 kind = any_chip;
1706 } else { 1692 } else {
1707 dev_dbg(&client->dev, ": Autodetection " 1693 dev_dbg(&adapter->dev, ": Autodetection "
1708 "failed\n"); 1694 "failed\n");
1709 /* Not an ADM1026 ... */ 1695 /* Not an ADM1026 ... */
1710 if (kind == 0) { /* User used force=x,y */ 1696 if (kind == 0) { /* User used force=x,y */
@@ -1713,33 +1699,29 @@ static int adm1026_detect(struct i2c_adapter *adapter, int address,
1713 "force_adm1026.\n", 1699 "force_adm1026.\n",
1714 i2c_adapter_id(adapter), address); 1700 i2c_adapter_id(adapter), address);
1715 } 1701 }
1716 goto exitfree; 1702 return -ENODEV;
1717 } 1703 }
1718 } 1704 }
1705 strlcpy(info->type, "adm1026", I2C_NAME_SIZE);
1719 1706
1720 /* Fill in the chip specific driver values */ 1707 return 0;
1721 switch (kind) { 1708}
1722 case any_chip : 1709
1723 type_name = "adm1026"; 1710static int adm1026_probe(struct i2c_client *client,
1724 break; 1711 const struct i2c_device_id *id)
1725 case adm1026 : 1712{
1726 type_name = "adm1026"; 1713 struct adm1026_data *data;
1727 break; 1714 int err;
1728 default : 1715
1729 dev_err(&adapter->dev, ": Internal error, invalid " 1716 data = kzalloc(sizeof(struct adm1026_data), GFP_KERNEL);
1730 "kind (%d)!\n", kind); 1717 if (!data) {
1731 err = -EFAULT; 1718 err = -ENOMEM;
1732 goto exitfree; 1719 goto exit;
1733 } 1720 }
1734 strlcpy(client->name, type_name, I2C_NAME_SIZE);
1735 1721
1736 /* Fill in the remaining client fields */ 1722 i2c_set_clientdata(client, data);
1737 mutex_init(&data->update_lock); 1723 mutex_init(&data->update_lock);
1738 1724
1739 /* Tell the I2C layer a new client has arrived */
1740 if ((err = i2c_attach_client(client)))
1741 goto exitfree;
1742
1743 /* Set the VRM version */ 1725 /* Set the VRM version */
1744 data->vrm = vid_which_vrm(); 1726 data->vrm = vid_which_vrm();
1745 1727
@@ -1748,7 +1730,7 @@ static int adm1026_detect(struct i2c_adapter *adapter, int address,
1748 1730
1749 /* Register sysfs hooks */ 1731 /* Register sysfs hooks */
1750 if ((err = sysfs_create_group(&client->dev.kobj, &adm1026_group))) 1732 if ((err = sysfs_create_group(&client->dev.kobj, &adm1026_group)))
1751 goto exitdetach; 1733 goto exitfree;
1752 if (data->config1 & CFG1_AIN8_9) 1734 if (data->config1 & CFG1_AIN8_9)
1753 err = sysfs_create_group(&client->dev.kobj, 1735 err = sysfs_create_group(&client->dev.kobj,
1754 &adm1026_group_in8_9); 1736 &adm1026_group_in8_9);
@@ -1773,15 +1755,13 @@ exitremove:
1773 sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9); 1755 sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9);
1774 else 1756 else
1775 sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3); 1757 sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3);
1776exitdetach:
1777 i2c_detach_client(client);
1778exitfree: 1758exitfree:
1779 kfree(data); 1759 kfree(data);
1780exit: 1760exit:
1781 return err; 1761 return err;
1782} 1762}
1783 1763
1784static int adm1026_detach_client(struct i2c_client *client) 1764static int adm1026_remove(struct i2c_client *client)
1785{ 1765{
1786 struct adm1026_data *data = i2c_get_clientdata(client); 1766 struct adm1026_data *data = i2c_get_clientdata(client);
1787 hwmon_device_unregister(data->hwmon_dev); 1767 hwmon_device_unregister(data->hwmon_dev);
@@ -1790,7 +1770,6 @@ static int adm1026_detach_client(struct i2c_client *client)
1790 sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9); 1770 sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9);
1791 else 1771 else
1792 sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3); 1772 sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3);
1793 i2c_detach_client(client);
1794 kfree(data); 1773 kfree(data);
1795 return 0; 1774 return 0;
1796} 1775}
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 2c6608d453c2..ba84ca5923f9 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -115,9 +115,11 @@ static const u8 ADM1029_REG_FAN_DIV[] = {
115 * Functions declaration 115 * Functions declaration
116 */ 116 */
117 117
118static int adm1029_attach_adapter(struct i2c_adapter *adapter); 118static int adm1029_probe(struct i2c_client *client,
119static int adm1029_detect(struct i2c_adapter *adapter, int address, int kind); 119 const struct i2c_device_id *id);
120static int adm1029_detach_client(struct i2c_client *client); 120static int adm1029_detect(struct i2c_client *client, int kind,
121 struct i2c_board_info *info);
122static int adm1029_remove(struct i2c_client *client);
121static struct adm1029_data *adm1029_update_device(struct device *dev); 123static struct adm1029_data *adm1029_update_device(struct device *dev);
122static int adm1029_init_client(struct i2c_client *client); 124static int adm1029_init_client(struct i2c_client *client);
123 125
@@ -125,12 +127,22 @@ static int adm1029_init_client(struct i2c_client *client);
125 * Driver data (common to all clients) 127 * Driver data (common to all clients)
126 */ 128 */
127 129
130static const struct i2c_device_id adm1029_id[] = {
131 { "adm1029", adm1029 },
132 { }
133};
134MODULE_DEVICE_TABLE(i2c, adm1029_id);
135
128static struct i2c_driver adm1029_driver = { 136static struct i2c_driver adm1029_driver = {
137 .class = I2C_CLASS_HWMON,
129 .driver = { 138 .driver = {
130 .name = "adm1029", 139 .name = "adm1029",
131 }, 140 },
132 .attach_adapter = adm1029_attach_adapter, 141 .probe = adm1029_probe,
133 .detach_client = adm1029_detach_client, 142 .remove = adm1029_remove,
143 .id_table = adm1029_id,
144 .detect = adm1029_detect,
145 .address_data = &addr_data,
134}; 146};
135 147
136/* 148/*
@@ -138,7 +150,6 @@ static struct i2c_driver adm1029_driver = {
138 */ 150 */
139 151
140struct adm1029_data { 152struct adm1029_data {
141 struct i2c_client client;
142 struct device *hwmon_dev; 153 struct device *hwmon_dev;
143 struct mutex update_lock; 154 struct mutex update_lock;
144 char valid; /* zero until following fields are valid */ 155 char valid; /* zero until following fields are valid */
@@ -284,37 +295,14 @@ static const struct attribute_group adm1029_group = {
284 * Real code 295 * Real code
285 */ 296 */
286 297
287static int adm1029_attach_adapter(struct i2c_adapter *adapter) 298/* Return 0 if detection is successful, -ENODEV otherwise */
299static int adm1029_detect(struct i2c_client *client, int kind,
300 struct i2c_board_info *info)
288{ 301{
289 if (!(adapter->class & I2C_CLASS_HWMON)) 302 struct i2c_adapter *adapter = client->adapter;
290 return 0;
291 return i2c_probe(adapter, &addr_data, adm1029_detect);
292}
293 303
294/*
295 * The following function does more than just detection. If detection
296 * succeeds, it also registers the new chip.
297 */
298
299static int adm1029_detect(struct i2c_adapter *adapter, int address, int kind)
300{
301 struct i2c_client *client;
302 struct adm1029_data *data;
303 int err = 0;
304 const char *name = "";
305 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 304 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
306 goto exit; 305 return -ENODEV;
307
308 if (!(data = kzalloc(sizeof(struct adm1029_data), GFP_KERNEL))) {
309 err = -ENOMEM;
310 goto exit;
311 }
312
313 client = &data->client;
314 i2c_set_clientdata(client, data);
315 client->addr = address;
316 client->adapter = adapter;
317 client->driver = &adm1029_driver;
318 306
319 /* Now we do the detection and identification. A negative kind 307 /* Now we do the detection and identification. A negative kind
320 * means that the driver was loaded with no force parameter 308 * means that the driver was loaded with no force parameter
@@ -362,32 +350,41 @@ static int adm1029_detect(struct i2c_adapter *adapter, int address, int kind)
362 if (kind <= 0) { /* identification failed */ 350 if (kind <= 0) { /* identification failed */
363 pr_debug("adm1029: Unsupported chip (man_id=0x%02X, " 351 pr_debug("adm1029: Unsupported chip (man_id=0x%02X, "
364 "chip_id=0x%02X)\n", man_id, chip_id); 352 "chip_id=0x%02X)\n", man_id, chip_id);
365 goto exit_free; 353 return -ENODEV;
366 } 354 }
367 } 355 }
356 strlcpy(info->type, "adm1029", I2C_NAME_SIZE);
368 357
369 if (kind == adm1029) { 358 return 0;
370 name = "adm1029"; 359}
360
361static int adm1029_probe(struct i2c_client *client,
362 const struct i2c_device_id *id)
363{
364 struct adm1029_data *data;
365 int err;
366
367 data = kzalloc(sizeof(struct adm1029_data), GFP_KERNEL);
368 if (!data) {
369 err = -ENOMEM;
370 goto exit;
371 } 371 }
372 372
373 /* We can fill in the remaining client fields */ 373 i2c_set_clientdata(client, data);
374 strlcpy(client->name, name, I2C_NAME_SIZE);
375 mutex_init(&data->update_lock); 374 mutex_init(&data->update_lock);
376 375
377 /* Tell the I2C layer a new client has arrived */
378 if ((err = i2c_attach_client(client)))
379 goto exit_free;
380
381 /* 376 /*
382 * Initialize the ADM1029 chip 377 * Initialize the ADM1029 chip
383 * Check config register 378 * Check config register
384 */ 379 */
385 if (adm1029_init_client(client) == 0) 380 if (adm1029_init_client(client) == 0) {
386 goto exit_detach; 381 err = -ENODEV;
382 goto exit_free;
383 }
387 384
388 /* Register sysfs hooks */ 385 /* Register sysfs hooks */
389 if ((err = sysfs_create_group(&client->dev.kobj, &adm1029_group))) 386 if ((err = sysfs_create_group(&client->dev.kobj, &adm1029_group)))
390 goto exit_detach; 387 goto exit_free;
391 388
392 data->hwmon_dev = hwmon_device_register(&client->dev); 389 data->hwmon_dev = hwmon_device_register(&client->dev);
393 if (IS_ERR(data->hwmon_dev)) { 390 if (IS_ERR(data->hwmon_dev)) {
@@ -399,8 +396,6 @@ static int adm1029_detect(struct i2c_adapter *adapter, int address, int kind)
399 396
400 exit_remove_files: 397 exit_remove_files:
401 sysfs_remove_group(&client->dev.kobj, &adm1029_group); 398 sysfs_remove_group(&client->dev.kobj, &adm1029_group);
402 exit_detach:
403 i2c_detach_client(client);
404 exit_free: 399 exit_free:
405 kfree(data); 400 kfree(data);
406 exit: 401 exit:
@@ -424,17 +419,13 @@ static int adm1029_init_client(struct i2c_client *client)
424 return 1; 419 return 1;
425} 420}
426 421
427static int adm1029_detach_client(struct i2c_client *client) 422static int adm1029_remove(struct i2c_client *client)
428{ 423{
429 struct adm1029_data *data = i2c_get_clientdata(client); 424 struct adm1029_data *data = i2c_get_clientdata(client);
430 int err;
431 425
432 hwmon_device_unregister(data->hwmon_dev); 426 hwmon_device_unregister(data->hwmon_dev);
433 sysfs_remove_group(&client->dev.kobj, &adm1029_group); 427 sysfs_remove_group(&client->dev.kobj, &adm1029_group);
434 428
435 if ((err = i2c_detach_client(client)))
436 return err;
437
438 kfree(data); 429 kfree(data);
439 return 0; 430 return 0;
440} 431}
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 2bffcab7dc9f..789441830cd8 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -70,7 +70,6 @@ typedef u8 auto_chan_table_t[8][2];
70 70
71/* Each client has this additional data */ 71/* Each client has this additional data */
72struct adm1031_data { 72struct adm1031_data {
73 struct i2c_client client;
74 struct device *hwmon_dev; 73 struct device *hwmon_dev;
75 struct mutex update_lock; 74 struct mutex update_lock;
76 int chip_type; 75 int chip_type;
@@ -99,19 +98,32 @@ struct adm1031_data {
99 s8 temp_crit[3]; 98 s8 temp_crit[3];
100}; 99};
101 100
102static int adm1031_attach_adapter(struct i2c_adapter *adapter); 101static int adm1031_probe(struct i2c_client *client,
103static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind); 102 const struct i2c_device_id *id);
103static int adm1031_detect(struct i2c_client *client, int kind,
104 struct i2c_board_info *info);
104static void adm1031_init_client(struct i2c_client *client); 105static void adm1031_init_client(struct i2c_client *client);
105static int adm1031_detach_client(struct i2c_client *client); 106static int adm1031_remove(struct i2c_client *client);
106static struct adm1031_data *adm1031_update_device(struct device *dev); 107static struct adm1031_data *adm1031_update_device(struct device *dev);
107 108
109static const struct i2c_device_id adm1031_id[] = {
110 { "adm1030", adm1030 },
111 { "adm1031", adm1031 },
112 { }
113};
114MODULE_DEVICE_TABLE(i2c, adm1031_id);
115
108/* This is the driver that will be inserted */ 116/* This is the driver that will be inserted */
109static struct i2c_driver adm1031_driver = { 117static struct i2c_driver adm1031_driver = {
118 .class = I2C_CLASS_HWMON,
110 .driver = { 119 .driver = {
111 .name = "adm1031", 120 .name = "adm1031",
112 }, 121 },
113 .attach_adapter = adm1031_attach_adapter, 122 .probe = adm1031_probe,
114 .detach_client = adm1031_detach_client, 123 .remove = adm1031_remove,
124 .id_table = adm1031_id,
125 .detect = adm1031_detect,
126 .address_data = &addr_data,
115}; 127};
116 128
117static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg) 129static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg)
@@ -693,13 +705,6 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
693static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); 705static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
694static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); 706static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
695 707
696static int adm1031_attach_adapter(struct i2c_adapter *adapter)
697{
698 if (!(adapter->class & I2C_CLASS_HWMON))
699 return 0;
700 return i2c_probe(adapter, &addr_data, adm1031_detect);
701}
702
703static struct attribute *adm1031_attributes[] = { 708static struct attribute *adm1031_attributes[] = {
704 &sensor_dev_attr_fan1_input.dev_attr.attr, 709 &sensor_dev_attr_fan1_input.dev_attr.attr,
705 &sensor_dev_attr_fan1_div.dev_attr.attr, 710 &sensor_dev_attr_fan1_div.dev_attr.attr,
@@ -770,27 +775,15 @@ static const struct attribute_group adm1031_group_opt = {
770 .attrs = adm1031_attributes_opt, 775 .attrs = adm1031_attributes_opt,
771}; 776};
772 777
773/* This function is called by i2c_probe */ 778/* Return 0 if detection is successful, -ENODEV otherwise */
774static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind) 779static int adm1031_detect(struct i2c_client *client, int kind,
780 struct i2c_board_info *info)
775{ 781{
776 struct i2c_client *client; 782 struct i2c_adapter *adapter = client->adapter;
777 struct adm1031_data *data;
778 int err = 0;
779 const char *name = ""; 783 const char *name = "";
780 784
781 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 785 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
782 goto exit; 786 return -ENODEV;
783
784 if (!(data = kzalloc(sizeof(struct adm1031_data), GFP_KERNEL))) {
785 err = -ENOMEM;
786 goto exit;
787 }
788
789 client = &data->client;
790 i2c_set_clientdata(client, data);
791 client->addr = address;
792 client->adapter = adapter;
793 client->driver = &adm1031_driver;
794 787
795 if (kind < 0) { 788 if (kind < 0) {
796 int id, co; 789 int id, co;
@@ -798,7 +791,7 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind)
798 co = i2c_smbus_read_byte_data(client, 0x3e); 791 co = i2c_smbus_read_byte_data(client, 0x3e);
799 792
800 if (!((id == 0x31 || id == 0x30) && co == 0x41)) 793 if (!((id == 0x31 || id == 0x30) && co == 0x41))
801 goto exit_free; 794 return -ENODEV;
802 kind = (id == 0x30) ? adm1030 : adm1031; 795 kind = (id == 0x30) ? adm1030 : adm1031;
803 } 796 }
804 797
@@ -809,28 +802,43 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind)
809 * auto fan control helper table. */ 802 * auto fan control helper table. */
810 if (kind == adm1030) { 803 if (kind == adm1030) {
811 name = "adm1030"; 804 name = "adm1030";
812 data->chan_select_table = &auto_channel_select_table_adm1030;
813 } else if (kind == adm1031) { 805 } else if (kind == adm1031) {
814 name = "adm1031"; 806 name = "adm1031";
815 data->chan_select_table = &auto_channel_select_table_adm1031;
816 } 807 }
817 data->chip_type = kind; 808 strlcpy(info->type, name, I2C_NAME_SIZE);
818 809
819 strlcpy(client->name, name, I2C_NAME_SIZE); 810 return 0;
811}
812
813static int adm1031_probe(struct i2c_client *client,
814 const struct i2c_device_id *id)
815{
816 struct adm1031_data *data;
817 int err;
818
819 data = kzalloc(sizeof(struct adm1031_data), GFP_KERNEL);
820 if (!data) {
821 err = -ENOMEM;
822 goto exit;
823 }
824
825 i2c_set_clientdata(client, data);
826 data->chip_type = id->driver_data;
820 mutex_init(&data->update_lock); 827 mutex_init(&data->update_lock);
821 828
822 /* Tell the I2C layer a new client has arrived */ 829 if (data->chip_type == adm1030)
823 if ((err = i2c_attach_client(client))) 830 data->chan_select_table = &auto_channel_select_table_adm1030;
824 goto exit_free; 831 else
832 data->chan_select_table = &auto_channel_select_table_adm1031;
825 833
826 /* Initialize the ADM1031 chip */ 834 /* Initialize the ADM1031 chip */
827 adm1031_init_client(client); 835 adm1031_init_client(client);
828 836
829 /* Register sysfs hooks */ 837 /* Register sysfs hooks */
830 if ((err = sysfs_create_group(&client->dev.kobj, &adm1031_group))) 838 if ((err = sysfs_create_group(&client->dev.kobj, &adm1031_group)))
831 goto exit_detach; 839 goto exit_free;
832 840
833 if (kind == adm1031) { 841 if (data->chip_type == adm1031) {
834 if ((err = sysfs_create_group(&client->dev.kobj, 842 if ((err = sysfs_create_group(&client->dev.kobj,
835 &adm1031_group_opt))) 843 &adm1031_group_opt)))
836 goto exit_remove; 844 goto exit_remove;
@@ -847,25 +855,19 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind)
847exit_remove: 855exit_remove:
848 sysfs_remove_group(&client->dev.kobj, &adm1031_group); 856 sysfs_remove_group(&client->dev.kobj, &adm1031_group);
849 sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt); 857 sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt);
850exit_detach:
851 i2c_detach_client(client);
852exit_free: 858exit_free:
853 kfree(data); 859 kfree(data);
854exit: 860exit:
855 return err; 861 return err;
856} 862}
857 863
858static int adm1031_detach_client(struct i2c_client *client) 864static int adm1031_remove(struct i2c_client *client)
859{ 865{
860 struct adm1031_data *data = i2c_get_clientdata(client); 866 struct adm1031_data *data = i2c_get_clientdata(client);
861 int ret;
862 867
863 hwmon_device_unregister(data->hwmon_dev); 868 hwmon_device_unregister(data->hwmon_dev);
864 sysfs_remove_group(&client->dev.kobj, &adm1031_group); 869 sysfs_remove_group(&client->dev.kobj, &adm1031_group);
865 sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt); 870 sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt);
866 if ((ret = i2c_detach_client(client)) != 0) {
867 return ret;
868 }
869 kfree(data); 871 kfree(data);
870 return 0; 872 return 0;
871} 873}
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 149ef25252e7..2444b15f2e9d 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -130,25 +130,37 @@ static inline unsigned int AOUT_FROM_REG(u8 reg)
130 return SCALE(reg, 1250, 255); 130 return SCALE(reg, 1250, 255);
131} 131}
132 132
133static int adm9240_attach_adapter(struct i2c_adapter *adapter); 133static int adm9240_probe(struct i2c_client *client,
134static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind); 134 const struct i2c_device_id *id);
135static int adm9240_detect(struct i2c_client *client, int kind,
136 struct i2c_board_info *info);
135static void adm9240_init_client(struct i2c_client *client); 137static void adm9240_init_client(struct i2c_client *client);
136static int adm9240_detach_client(struct i2c_client *client); 138static int adm9240_remove(struct i2c_client *client);
137static struct adm9240_data *adm9240_update_device(struct device *dev); 139static struct adm9240_data *adm9240_update_device(struct device *dev);
138 140
139/* driver data */ 141/* driver data */
142static const struct i2c_device_id adm9240_id[] = {
143 { "adm9240", adm9240 },
144 { "ds1780", ds1780 },
145 { "lm81", lm81 },
146 { }
147};
148MODULE_DEVICE_TABLE(i2c, adm9240_id);
149
140static struct i2c_driver adm9240_driver = { 150static struct i2c_driver adm9240_driver = {
151 .class = I2C_CLASS_HWMON,
141 .driver = { 152 .driver = {
142 .name = "adm9240", 153 .name = "adm9240",
143 }, 154 },
144 .attach_adapter = adm9240_attach_adapter, 155 .probe = adm9240_probe,
145 .detach_client = adm9240_detach_client, 156 .remove = adm9240_remove,
157 .id_table = adm9240_id,
158 .detect = adm9240_detect,
159 .address_data = &addr_data,
146}; 160};
147 161
148/* per client data */ 162/* per client data */
149struct adm9240_data { 163struct adm9240_data {
150 enum chips type;
151 struct i2c_client client;
152 struct device *hwmon_dev; 164 struct device *hwmon_dev;
153 struct mutex update_lock; 165 struct mutex update_lock;
154 char valid; 166 char valid;
@@ -532,28 +544,17 @@ static const struct attribute_group adm9240_group = {
532 544
533/*** sensor chip detect and driver install ***/ 545/*** sensor chip detect and driver install ***/
534 546
535static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind) 547/* Return 0 if detection is successful, -ENODEV otherwise */
548static int adm9240_detect(struct i2c_client *new_client, int kind,
549 struct i2c_board_info *info)
536{ 550{
537 struct i2c_client *new_client; 551 struct i2c_adapter *adapter = new_client->adapter;
538 struct adm9240_data *data;
539 int err = 0;
540 const char *name = ""; 552 const char *name = "";
553 int address = new_client->addr;
541 u8 man_id, die_rev; 554 u8 man_id, die_rev;
542 555
543 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 556 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
544 goto exit; 557 return -ENODEV;
545
546 if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) {
547 err = -ENOMEM;
548 goto exit;
549 }
550
551 new_client = &data->client;
552 i2c_set_clientdata(new_client, data);
553 new_client->addr = address;
554 new_client->adapter = adapter;
555 new_client->driver = &adm9240_driver;
556 new_client->flags = 0;
557 558
558 if (kind == 0) { 559 if (kind == 0) {
559 kind = adm9240; 560 kind = adm9240;
@@ -566,7 +567,7 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind)
566 != address) { 567 != address) {
567 dev_err(&adapter->dev, "detect fail: address match, " 568 dev_err(&adapter->dev, "detect fail: address match, "
568 "0x%02x\n", address); 569 "0x%02x\n", address);
569 goto exit_free; 570 return -ENODEV;
570 } 571 }
571 572
572 /* check known chip manufacturer */ 573 /* check known chip manufacturer */
@@ -581,7 +582,7 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind)
581 } else { 582 } else {
582 dev_err(&adapter->dev, "detect fail: unknown manuf, " 583 dev_err(&adapter->dev, "detect fail: unknown manuf, "
583 "0x%02x\n", man_id); 584 "0x%02x\n", man_id);
584 goto exit_free; 585 return -ENODEV;
585 } 586 }
586 587
587 /* successful detect, print chip info */ 588 /* successful detect, print chip info */
@@ -600,20 +601,31 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind)
600 } else if (kind == lm81) { 601 } else if (kind == lm81) {
601 name = "lm81"; 602 name = "lm81";
602 } 603 }
604 strlcpy(info->type, name, I2C_NAME_SIZE);
603 605
604 /* fill in the remaining client fields and attach */ 606 return 0;
605 strlcpy(new_client->name, name, I2C_NAME_SIZE); 607}
606 data->type = kind;
607 mutex_init(&data->update_lock);
608 608
609 if ((err = i2c_attach_client(new_client))) 609static int adm9240_probe(struct i2c_client *new_client,
610 goto exit_free; 610 const struct i2c_device_id *id)
611{
612 struct adm9240_data *data;
613 int err;
614
615 data = kzalloc(sizeof(*data), GFP_KERNEL);
616 if (!data) {
617 err = -ENOMEM;
618 goto exit;
619 }
620
621 i2c_set_clientdata(new_client, data);
622 mutex_init(&data->update_lock);
611 623
612 adm9240_init_client(new_client); 624 adm9240_init_client(new_client);
613 625
614 /* populate sysfs filesystem */ 626 /* populate sysfs filesystem */
615 if ((err = sysfs_create_group(&new_client->dev.kobj, &adm9240_group))) 627 if ((err = sysfs_create_group(&new_client->dev.kobj, &adm9240_group)))
616 goto exit_detach; 628 goto exit_free;
617 629
618 data->hwmon_dev = hwmon_device_register(&new_client->dev); 630 data->hwmon_dev = hwmon_device_register(&new_client->dev);
619 if (IS_ERR(data->hwmon_dev)) { 631 if (IS_ERR(data->hwmon_dev)) {
@@ -625,32 +637,19 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind)
625 637
626exit_remove: 638exit_remove:
627 sysfs_remove_group(&new_client->dev.kobj, &adm9240_group); 639 sysfs_remove_group(&new_client->dev.kobj, &adm9240_group);
628exit_detach:
629 i2c_detach_client(new_client);
630exit_free: 640exit_free:
631 kfree(data); 641 kfree(data);
632exit: 642exit:
633 return err; 643 return err;
634} 644}
635 645
636static int adm9240_attach_adapter(struct i2c_adapter *adapter) 646static int adm9240_remove(struct i2c_client *client)
637{
638 if (!(adapter->class & I2C_CLASS_HWMON))
639 return 0;
640 return i2c_probe(adapter, &addr_data, adm9240_detect);
641}
642
643static int adm9240_detach_client(struct i2c_client *client)
644{ 647{
645 struct adm9240_data *data = i2c_get_clientdata(client); 648 struct adm9240_data *data = i2c_get_clientdata(client);
646 int err;
647 649
648 hwmon_device_unregister(data->hwmon_dev); 650 hwmon_device_unregister(data->hwmon_dev);
649 sysfs_remove_group(&client->dev.kobj, &adm9240_group); 651 sysfs_remove_group(&client->dev.kobj, &adm9240_group);
650 652
651 if ((err = i2c_detach_client(client)))
652 return err;
653
654 kfree(data); 653 kfree(data);
655 return 0; 654 return 0;
656} 655}
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 5c8b6e0ff47c..5c39b4af1b23 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -64,7 +64,6 @@ static unsigned int ads7828_lsb_resol; /* resolution of the ADC sample lsb */
64 64
65/* Each client has this additional data */ 65/* Each client has this additional data */
66struct ads7828_data { 66struct ads7828_data {
67 struct i2c_client client;
68 struct device *hwmon_dev; 67 struct device *hwmon_dev;
69 struct mutex update_lock; /* mutex protect updates */ 68 struct mutex update_lock; /* mutex protect updates */
70 char valid; /* !=0 if following fields are valid */ 69 char valid; /* !=0 if following fields are valid */
@@ -73,7 +72,10 @@ struct ads7828_data {
73}; 72};
74 73
75/* Function declaration - necessary due to function dependencies */ 74/* Function declaration - necessary due to function dependencies */
76static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind); 75static int ads7828_detect(struct i2c_client *client, int kind,
76 struct i2c_board_info *info);
77static int ads7828_probe(struct i2c_client *client,
78 const struct i2c_device_id *id);
77 79
78/* The ADS7828 returns the 12-bit sample in two bytes, 80/* The ADS7828 returns the 12-bit sample in two bytes,
79 these are read as a word then byte-swapped */ 81 these are read as a word then byte-swapped */
@@ -156,58 +158,43 @@ static const struct attribute_group ads7828_group = {
156 .attrs = ads7828_attributes, 158 .attrs = ads7828_attributes,
157}; 159};
158 160
159static int ads7828_attach_adapter(struct i2c_adapter *adapter) 161static int ads7828_remove(struct i2c_client *client)
160{
161 if (!(adapter->class & I2C_CLASS_HWMON))
162 return 0;
163 return i2c_probe(adapter, &addr_data, ads7828_detect);
164}
165
166static int ads7828_detach_client(struct i2c_client *client)
167{ 162{
168 struct ads7828_data *data = i2c_get_clientdata(client); 163 struct ads7828_data *data = i2c_get_clientdata(client);
169 hwmon_device_unregister(data->hwmon_dev); 164 hwmon_device_unregister(data->hwmon_dev);
170 sysfs_remove_group(&client->dev.kobj, &ads7828_group); 165 sysfs_remove_group(&client->dev.kobj, &ads7828_group);
171 i2c_detach_client(client);
172 kfree(i2c_get_clientdata(client)); 166 kfree(i2c_get_clientdata(client));
173 return 0; 167 return 0;
174} 168}
175 169
170static const struct i2c_device_id ads7828_id[] = {
171 { "ads7828", ads7828 },
172 { }
173};
174MODULE_DEVICE_TABLE(i2c, ads7828_id);
175
176/* This is the driver that will be inserted */ 176/* This is the driver that will be inserted */
177static struct i2c_driver ads7828_driver = { 177static struct i2c_driver ads7828_driver = {
178 .class = I2C_CLASS_HWMON,
178 .driver = { 179 .driver = {
179 .name = "ads7828", 180 .name = "ads7828",
180 }, 181 },
181 .attach_adapter = ads7828_attach_adapter, 182 .probe = ads7828_probe,
182 .detach_client = ads7828_detach_client, 183 .remove = ads7828_remove,
184 .id_table = ads7828_id,
185 .detect = ads7828_detect,
186 .address_data = &addr_data,
183}; 187};
184 188
185/* This function is called by i2c_probe */ 189/* Return 0 if detection is successful, -ENODEV otherwise */
186static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind) 190static int ads7828_detect(struct i2c_client *client, int kind,
191 struct i2c_board_info *info)
187{ 192{
188 struct i2c_client *client; 193 struct i2c_adapter *adapter = client->adapter;
189 struct ads7828_data *data;
190 int err = 0;
191 const char *name = "";
192 194
193 /* Check we have a valid client */ 195 /* Check we have a valid client */
194 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) 196 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA))
195 goto exit; 197 return -ENODEV;
196
197 /* OK. For now, we presume we have a valid client. We now create the
198 client structure, even though we cannot fill it completely yet.
199 But it allows us to access ads7828_read_value. */
200 data = kzalloc(sizeof(struct ads7828_data), GFP_KERNEL);
201 if (!data) {
202 err = -ENOMEM;
203 goto exit;
204 }
205
206 client = &data->client;
207 i2c_set_clientdata(client, data);
208 client->addr = address;
209 client->adapter = adapter;
210 client->driver = &ads7828_driver;
211 198
212 /* Now, we do the remaining detection. There is no identification 199 /* Now, we do the remaining detection. There is no identification
213 dedicated register so attempt to sanity check using knowledge of 200 dedicated register so attempt to sanity check using knowledge of
@@ -225,32 +212,34 @@ static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind)
225 printk(KERN_DEBUG 212 printk(KERN_DEBUG
226 "%s : Doesn't look like an ads7828 device\n", 213 "%s : Doesn't look like an ads7828 device\n",
227 __func__); 214 __func__);
228 goto exit_free; 215 return -ENODEV;
229 } 216 }
230 } 217 }
231 } 218 }
219 strlcpy(info->type, "ads7828", I2C_NAME_SIZE);
232 220
233 /* Determine the chip type - only one kind supported! */ 221 return 0;
234 if (kind <= 0) 222}
235 kind = ads7828;
236 223
237 if (kind == ads7828) 224static int ads7828_probe(struct i2c_client *client,
238 name = "ads7828"; 225 const struct i2c_device_id *id)
226{
227 struct ads7828_data *data;
228 int err;
239 229
240 /* Fill in the remaining client fields, put it into the global list */ 230 data = kzalloc(sizeof(struct ads7828_data), GFP_KERNEL);
241 strlcpy(client->name, name, I2C_NAME_SIZE); 231 if (!data) {
232 err = -ENOMEM;
233 goto exit;
234 }
242 235
236 i2c_set_clientdata(client, data);
243 mutex_init(&data->update_lock); 237 mutex_init(&data->update_lock);
244 238
245 /* Tell the I2C layer a new client has arrived */
246 err = i2c_attach_client(client);
247 if (err)
248 goto exit_free;
249
250 /* Register sysfs hooks */ 239 /* Register sysfs hooks */
251 err = sysfs_create_group(&client->dev.kobj, &ads7828_group); 240 err = sysfs_create_group(&client->dev.kobj, &ads7828_group);
252 if (err) 241 if (err)
253 goto exit_detach; 242 goto exit_free;
254 243
255 data->hwmon_dev = hwmon_device_register(&client->dev); 244 data->hwmon_dev = hwmon_device_register(&client->dev);
256 if (IS_ERR(data->hwmon_dev)) { 245 if (IS_ERR(data->hwmon_dev)) {
@@ -262,8 +251,6 @@ static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind)
262 251
263exit_remove: 252exit_remove:
264 sysfs_remove_group(&client->dev.kobj, &ads7828_group); 253 sysfs_remove_group(&client->dev.kobj, &ads7828_group);
265exit_detach:
266 i2c_detach_client(client);
267exit_free: 254exit_free:
268 kfree(data); 255 kfree(data);
269exit: 256exit:
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 6b5325f33a2c..d368d8f845e1 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -138,7 +138,6 @@ I2C_CLIENT_INSMOD_1(adt7470);
138#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID) 138#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
139 139
140struct adt7470_data { 140struct adt7470_data {
141 struct i2c_client client;
142 struct device *hwmon_dev; 141 struct device *hwmon_dev;
143 struct attribute_group attrs; 142 struct attribute_group attrs;
144 struct mutex lock; 143 struct mutex lock;
@@ -164,16 +163,28 @@ struct adt7470_data {
164 u8 pwm_auto_temp[ADT7470_PWM_COUNT]; 163 u8 pwm_auto_temp[ADT7470_PWM_COUNT];
165}; 164};
166 165
167static int adt7470_attach_adapter(struct i2c_adapter *adapter); 166static int adt7470_probe(struct i2c_client *client,
168static int adt7470_detect(struct i2c_adapter *adapter, int address, int kind); 167 const struct i2c_device_id *id);
169static int adt7470_detach_client(struct i2c_client *client); 168static int adt7470_detect(struct i2c_client *client, int kind,
169 struct i2c_board_info *info);
170static int adt7470_remove(struct i2c_client *client);
171
172static const struct i2c_device_id adt7470_id[] = {
173 { "adt7470", adt7470 },
174 { }
175};
176MODULE_DEVICE_TABLE(i2c, adt7470_id);
170 177
171static struct i2c_driver adt7470_driver = { 178static struct i2c_driver adt7470_driver = {
179 .class = I2C_CLASS_HWMON,
172 .driver = { 180 .driver = {
173 .name = "adt7470", 181 .name = "adt7470",
174 }, 182 },
175 .attach_adapter = adt7470_attach_adapter, 183 .probe = adt7470_probe,
176 .detach_client = adt7470_detach_client, 184 .remove = adt7470_remove,
185 .id_table = adt7470_id,
186 .detect = adt7470_detect,
187 .address_data = &addr_data,
177}; 188};
178 189
179/* 190/*
@@ -1004,64 +1015,52 @@ static struct attribute *adt7470_attr[] =
1004 NULL 1015 NULL
1005}; 1016};
1006 1017
1007static int adt7470_attach_adapter(struct i2c_adapter *adapter) 1018/* Return 0 if detection is successful, -ENODEV otherwise */
1008{ 1019static int adt7470_detect(struct i2c_client *client, int kind,
1009 if (!(adapter->class & I2C_CLASS_HWMON)) 1020 struct i2c_board_info *info)
1010 return 0;
1011 return i2c_probe(adapter, &addr_data, adt7470_detect);
1012}
1013
1014static int adt7470_detect(struct i2c_adapter *adapter, int address, int kind)
1015{ 1021{
1016 struct i2c_client *client; 1022 struct i2c_adapter *adapter = client->adapter;
1017 struct adt7470_data *data;
1018 int err = 0;
1019 1023
1020 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 1024 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
1021 goto exit; 1025 return -ENODEV;
1022
1023 if (!(data = kzalloc(sizeof(struct adt7470_data), GFP_KERNEL))) {
1024 err = -ENOMEM;
1025 goto exit;
1026 }
1027
1028 client = &data->client;
1029 client->addr = address;
1030 client->adapter = adapter;
1031 client->driver = &adt7470_driver;
1032
1033 i2c_set_clientdata(client, data);
1034
1035 mutex_init(&data->lock);
1036 1026
1037 if (kind <= 0) { 1027 if (kind <= 0) {
1038 int vendor, device, revision; 1028 int vendor, device, revision;
1039 1029
1040 vendor = i2c_smbus_read_byte_data(client, ADT7470_REG_VENDOR); 1030 vendor = i2c_smbus_read_byte_data(client, ADT7470_REG_VENDOR);
1041 if (vendor != ADT7470_VENDOR) { 1031 if (vendor != ADT7470_VENDOR)
1042 err = -ENODEV; 1032 return -ENODEV;
1043 goto exit_free;
1044 }
1045 1033
1046 device = i2c_smbus_read_byte_data(client, ADT7470_REG_DEVICE); 1034 device = i2c_smbus_read_byte_data(client, ADT7470_REG_DEVICE);
1047 if (device != ADT7470_DEVICE) { 1035 if (device != ADT7470_DEVICE)
1048 err = -ENODEV; 1036 return -ENODEV;
1049 goto exit_free;
1050 }
1051 1037
1052 revision = i2c_smbus_read_byte_data(client, 1038 revision = i2c_smbus_read_byte_data(client,
1053 ADT7470_REG_REVISION); 1039 ADT7470_REG_REVISION);
1054 if (revision != ADT7470_REVISION) { 1040 if (revision != ADT7470_REVISION)
1055 err = -ENODEV; 1041 return -ENODEV;
1056 goto exit_free;
1057 }
1058 } else 1042 } else
1059 dev_dbg(&adapter->dev, "detection forced\n"); 1043 dev_dbg(&adapter->dev, "detection forced\n");
1060 1044
1061 strlcpy(client->name, "adt7470", I2C_NAME_SIZE); 1045 strlcpy(info->type, "adt7470", I2C_NAME_SIZE);
1062 1046
1063 if ((err = i2c_attach_client(client))) 1047 return 0;
1064 goto exit_free; 1048}
1049
1050static int adt7470_probe(struct i2c_client *client,
1051 const struct i2c_device_id *id)
1052{
1053 struct adt7470_data *data;
1054 int err;
1055
1056 data = kzalloc(sizeof(struct adt7470_data), GFP_KERNEL);
1057 if (!data) {
1058 err = -ENOMEM;
1059 goto exit;
1060 }
1061
1062 i2c_set_clientdata(client, data);
1063 mutex_init(&data->lock);
1065 1064
1066 dev_info(&client->dev, "%s chip found\n", client->name); 1065 dev_info(&client->dev, "%s chip found\n", client->name);
1067 1066
@@ -1071,7 +1070,7 @@ static int adt7470_detect(struct i2c_adapter *adapter, int address, int kind)
1071 /* Register sysfs hooks */ 1070 /* Register sysfs hooks */
1072 data->attrs.attrs = adt7470_attr; 1071 data->attrs.attrs = adt7470_attr;
1073 if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs))) 1072 if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs)))
1074 goto exit_detach; 1073 goto exit_free;
1075 1074
1076 data->hwmon_dev = hwmon_device_register(&client->dev); 1075 data->hwmon_dev = hwmon_device_register(&client->dev);
1077 if (IS_ERR(data->hwmon_dev)) { 1076 if (IS_ERR(data->hwmon_dev)) {
@@ -1083,21 +1082,18 @@ static int adt7470_detect(struct i2c_adapter *adapter, int address, int kind)
1083 1082
1084exit_remove: 1083exit_remove:
1085 sysfs_remove_group(&client->dev.kobj, &data->attrs); 1084 sysfs_remove_group(&client->dev.kobj, &data->attrs);
1086exit_detach:
1087 i2c_detach_client(client);
1088exit_free: 1085exit_free:
1089 kfree(data); 1086 kfree(data);
1090exit: 1087exit:
1091 return err; 1088 return err;
1092} 1089}
1093 1090
1094static int adt7470_detach_client(struct i2c_client *client) 1091static int adt7470_remove(struct i2c_client *client)
1095{ 1092{
1096 struct adt7470_data *data = i2c_get_clientdata(client); 1093 struct adt7470_data *data = i2c_get_clientdata(client);
1097 1094
1098 hwmon_device_unregister(data->hwmon_dev); 1095 hwmon_device_unregister(data->hwmon_dev);
1099 sysfs_remove_group(&client->dev.kobj, &data->attrs); 1096 sysfs_remove_group(&client->dev.kobj, &data->attrs);
1100 i2c_detach_client(client);
1101 kfree(data); 1097 kfree(data);
1102 return 0; 1098 return 0;
1103} 1099}
diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c
index 93dbf5e7ff8a..ce4a7cb5a116 100644
--- a/drivers/hwmon/adt7473.c
+++ b/drivers/hwmon/adt7473.c
@@ -143,7 +143,6 @@ I2C_CLIENT_INSMOD_1(adt7473);
143#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID) 143#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
144 144
145struct adt7473_data { 145struct adt7473_data {
146 struct i2c_client client;
147 struct device *hwmon_dev; 146 struct device *hwmon_dev;
148 struct attribute_group attrs; 147 struct attribute_group attrs;
149 struct mutex lock; 148 struct mutex lock;
@@ -178,16 +177,28 @@ struct adt7473_data {
178 u8 max_duty_at_overheat; 177 u8 max_duty_at_overheat;
179}; 178};
180 179
181static int adt7473_attach_adapter(struct i2c_adapter *adapter); 180static int adt7473_probe(struct i2c_client *client,
182static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind); 181 const struct i2c_device_id *id);
183static int adt7473_detach_client(struct i2c_client *client); 182static int adt7473_detect(struct i2c_client *client, int kind,
183 struct i2c_board_info *info);
184static int adt7473_remove(struct i2c_client *client);
185
186static const struct i2c_device_id adt7473_id[] = {
187 { "adt7473", adt7473 },
188 { }
189};
190MODULE_DEVICE_TABLE(i2c, adt7473_id);
184 191
185static struct i2c_driver adt7473_driver = { 192static struct i2c_driver adt7473_driver = {
193 .class = I2C_CLASS_HWMON,
186 .driver = { 194 .driver = {
187 .name = "adt7473", 195 .name = "adt7473",
188 }, 196 },
189 .attach_adapter = adt7473_attach_adapter, 197 .probe = adt7473_probe,
190 .detach_client = adt7473_detach_client, 198 .remove = adt7473_remove,
199 .id_table = adt7473_id,
200 .detect = adt7473_detect,
201 .address_data = &addr_data,
191}; 202};
192 203
193/* 204/*
@@ -1042,66 +1053,52 @@ static struct attribute *adt7473_attr[] =
1042 NULL 1053 NULL
1043}; 1054};
1044 1055
1045static int adt7473_attach_adapter(struct i2c_adapter *adapter) 1056/* Return 0 if detection is successful, -ENODEV otherwise */
1057static int adt7473_detect(struct i2c_client *client, int kind,
1058 struct i2c_board_info *info)
1046{ 1059{
1047 if (!(adapter->class & I2C_CLASS_HWMON)) 1060 struct i2c_adapter *adapter = client->adapter;
1048 return 0;
1049 return i2c_probe(adapter, &addr_data, adt7473_detect);
1050}
1051
1052static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind)
1053{
1054 struct i2c_client *client;
1055 struct adt7473_data *data;
1056 int err = 0;
1057 1061
1058 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 1062 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
1059 goto exit; 1063 return -ENODEV;
1060
1061 data = kzalloc(sizeof(struct adt7473_data), GFP_KERNEL);
1062 if (!data) {
1063 err = -ENOMEM;
1064 goto exit;
1065 }
1066
1067 client = &data->client;
1068 client->addr = address;
1069 client->adapter = adapter;
1070 client->driver = &adt7473_driver;
1071
1072 i2c_set_clientdata(client, data);
1073
1074 mutex_init(&data->lock);
1075 1064
1076 if (kind <= 0) { 1065 if (kind <= 0) {
1077 int vendor, device, revision; 1066 int vendor, device, revision;
1078 1067
1079 vendor = i2c_smbus_read_byte_data(client, ADT7473_REG_VENDOR); 1068 vendor = i2c_smbus_read_byte_data(client, ADT7473_REG_VENDOR);
1080 if (vendor != ADT7473_VENDOR) { 1069 if (vendor != ADT7473_VENDOR)
1081 err = -ENODEV; 1070 return -ENODEV;
1082 goto exit_free;
1083 }
1084 1071
1085 device = i2c_smbus_read_byte_data(client, ADT7473_REG_DEVICE); 1072 device = i2c_smbus_read_byte_data(client, ADT7473_REG_DEVICE);
1086 if (device != ADT7473_DEVICE) { 1073 if (device != ADT7473_DEVICE)
1087 err = -ENODEV; 1074 return -ENODEV;
1088 goto exit_free;
1089 }
1090 1075
1091 revision = i2c_smbus_read_byte_data(client, 1076 revision = i2c_smbus_read_byte_data(client,
1092 ADT7473_REG_REVISION); 1077 ADT7473_REG_REVISION);
1093 if (revision != ADT7473_REV_68 && revision != ADT7473_REV_69) { 1078 if (revision != ADT7473_REV_68 && revision != ADT7473_REV_69)
1094 err = -ENODEV; 1079 return -ENODEV;
1095 goto exit_free;
1096 }
1097 } else 1080 } else
1098 dev_dbg(&adapter->dev, "detection forced\n"); 1081 dev_dbg(&adapter->dev, "detection forced\n");
1099 1082
1100 strlcpy(client->name, "adt7473", I2C_NAME_SIZE); 1083 strlcpy(info->type, "adt7473", I2C_NAME_SIZE);
1101 1084
1102 err = i2c_attach_client(client); 1085 return 0;
1103 if (err) 1086}
1104 goto exit_free; 1087
1088static int adt7473_probe(struct i2c_client *client,
1089 const struct i2c_device_id *id)
1090{
1091 struct adt7473_data *data;
1092 int err;
1093
1094 data = kzalloc(sizeof(struct adt7473_data), GFP_KERNEL);
1095 if (!data) {
1096 err = -ENOMEM;
1097 goto exit;
1098 }
1099
1100 i2c_set_clientdata(client, data);
1101 mutex_init(&data->lock);
1105 1102
1106 dev_info(&client->dev, "%s chip found\n", client->name); 1103 dev_info(&client->dev, "%s chip found\n", client->name);
1107 1104
@@ -1112,7 +1109,7 @@ static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind)
1112 data->attrs.attrs = adt7473_attr; 1109 data->attrs.attrs = adt7473_attr;
1113 err = sysfs_create_group(&client->dev.kobj, &data->attrs); 1110 err = sysfs_create_group(&client->dev.kobj, &data->attrs);
1114 if (err) 1111 if (err)
1115 goto exit_detach; 1112 goto exit_free;
1116 1113
1117 data->hwmon_dev = hwmon_device_register(&client->dev); 1114 data->hwmon_dev = hwmon_device_register(&client->dev);
1118 if (IS_ERR(data->hwmon_dev)) { 1115 if (IS_ERR(data->hwmon_dev)) {
@@ -1124,21 +1121,18 @@ static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind)
1124 1121
1125exit_remove: 1122exit_remove:
1126 sysfs_remove_group(&client->dev.kobj, &data->attrs); 1123 sysfs_remove_group(&client->dev.kobj, &data->attrs);
1127exit_detach:
1128 i2c_detach_client(client);
1129exit_free: 1124exit_free:
1130 kfree(data); 1125 kfree(data);
1131exit: 1126exit:
1132 return err; 1127 return err;
1133} 1128}
1134 1129
1135static int adt7473_detach_client(struct i2c_client *client) 1130static int adt7473_remove(struct i2c_client *client)
1136{ 1131{
1137 struct adt7473_data *data = i2c_get_clientdata(client); 1132 struct adt7473_data *data = i2c_get_clientdata(client);
1138 1133
1139 hwmon_device_unregister(data->hwmon_dev); 1134 hwmon_device_unregister(data->hwmon_dev);
1140 sysfs_remove_group(&client->dev.kobj, &data->attrs); 1135 sysfs_remove_group(&client->dev.kobj, &data->attrs);
1141 i2c_detach_client(client);
1142 kfree(data); 1136 kfree(data);
1143 return 0; 1137 return 0;
1144} 1138}
diff --git a/drivers/hwmon/ams/ams-core.c b/drivers/hwmon/ams/ams-core.c
index a112a03e8f29..fbefa82a015c 100644
--- a/drivers/hwmon/ams/ams-core.c
+++ b/drivers/hwmon/ams/ams-core.c
@@ -23,8 +23,8 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/of_platform.h>
26#include <asm/pmac_pfunc.h> 27#include <asm/pmac_pfunc.h>
27#include <asm/of_platform.h>
28 28
29#include "ams.h" 29#include "ams.h"
30 30
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index fe2eea4d799b..8a45a2e6ba8a 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -176,10 +176,8 @@ static u8 DIV_TO_REG(long val)
176 data is pointed to by client->data. The structure itself is 176 data is pointed to by client->data. The structure itself is
177 dynamically allocated, at the same time the client itself is allocated. */ 177 dynamically allocated, at the same time the client itself is allocated. */
178struct asb100_data { 178struct asb100_data {
179 struct i2c_client client;
180 struct device *hwmon_dev; 179 struct device *hwmon_dev;
181 struct mutex lock; 180 struct mutex lock;
182 enum chips type;
183 181
184 struct mutex update_lock; 182 struct mutex update_lock;
185 unsigned long last_updated; /* In jiffies */ 183 unsigned long last_updated; /* In jiffies */
@@ -206,18 +204,30 @@ struct asb100_data {
206static int asb100_read_value(struct i2c_client *client, u16 reg); 204static int asb100_read_value(struct i2c_client *client, u16 reg);
207static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val); 205static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val);
208 206
209static int asb100_attach_adapter(struct i2c_adapter *adapter); 207static int asb100_probe(struct i2c_client *client,
210static int asb100_detect(struct i2c_adapter *adapter, int address, int kind); 208 const struct i2c_device_id *id);
211static int asb100_detach_client(struct i2c_client *client); 209static int asb100_detect(struct i2c_client *client, int kind,
210 struct i2c_board_info *info);
211static int asb100_remove(struct i2c_client *client);
212static struct asb100_data *asb100_update_device(struct device *dev); 212static struct asb100_data *asb100_update_device(struct device *dev);
213static void asb100_init_client(struct i2c_client *client); 213static void asb100_init_client(struct i2c_client *client);
214 214
215static const struct i2c_device_id asb100_id[] = {
216 { "asb100", asb100 },
217 { }
218};
219MODULE_DEVICE_TABLE(i2c, asb100_id);
220
215static struct i2c_driver asb100_driver = { 221static struct i2c_driver asb100_driver = {
222 .class = I2C_CLASS_HWMON,
216 .driver = { 223 .driver = {
217 .name = "asb100", 224 .name = "asb100",
218 }, 225 },
219 .attach_adapter = asb100_attach_adapter, 226 .probe = asb100_probe,
220 .detach_client = asb100_detach_client, 227 .remove = asb100_remove,
228 .id_table = asb100_id,
229 .detect = asb100_detect,
230 .address_data = &addr_data,
221}; 231};
222 232
223/* 7 Voltages */ 233/* 7 Voltages */
@@ -619,35 +629,13 @@ static const struct attribute_group asb100_group = {
619 .attrs = asb100_attributes, 629 .attrs = asb100_attributes,
620}; 630};
621 631
622/* This function is called when: 632static int asb100_detect_subclients(struct i2c_client *client)
623 asb100_driver is inserted (when this module is loaded), for each
624 available adapter
625 when a new adapter is inserted (and asb100_driver is still present)
626 */
627static int asb100_attach_adapter(struct i2c_adapter *adapter)
628{
629 if (!(adapter->class & I2C_CLASS_HWMON))
630 return 0;
631 return i2c_probe(adapter, &addr_data, asb100_detect);
632}
633
634static int asb100_detect_subclients(struct i2c_adapter *adapter, int address,
635 int kind, struct i2c_client *client)
636{ 633{
637 int i, id, err; 634 int i, id, err;
635 int address = client->addr;
636 unsigned short sc_addr[2];
638 struct asb100_data *data = i2c_get_clientdata(client); 637 struct asb100_data *data = i2c_get_clientdata(client);
639 638 struct i2c_adapter *adapter = client->adapter;
640 data->lm75[0] = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
641 if (!(data->lm75[0])) {
642 err = -ENOMEM;
643 goto ERROR_SC_0;
644 }
645
646 data->lm75[1] = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
647 if (!(data->lm75[1])) {
648 err = -ENOMEM;
649 goto ERROR_SC_1;
650 }
651 639
652 id = i2c_adapter_id(adapter); 640 id = i2c_adapter_id(adapter);
653 641
@@ -665,37 +653,34 @@ static int asb100_detect_subclients(struct i2c_adapter *adapter, int address,
665 asb100_write_value(client, ASB100_REG_I2C_SUBADDR, 653 asb100_write_value(client, ASB100_REG_I2C_SUBADDR,
666 (force_subclients[2] & 0x07) | 654 (force_subclients[2] & 0x07) |
667 ((force_subclients[3] & 0x07) << 4)); 655 ((force_subclients[3] & 0x07) << 4));
668 data->lm75[0]->addr = force_subclients[2]; 656 sc_addr[0] = force_subclients[2];
669 data->lm75[1]->addr = force_subclients[3]; 657 sc_addr[1] = force_subclients[3];
670 } else { 658 } else {
671 int val = asb100_read_value(client, ASB100_REG_I2C_SUBADDR); 659 int val = asb100_read_value(client, ASB100_REG_I2C_SUBADDR);
672 data->lm75[0]->addr = 0x48 + (val & 0x07); 660 sc_addr[0] = 0x48 + (val & 0x07);
673 data->lm75[1]->addr = 0x48 + ((val >> 4) & 0x07); 661 sc_addr[1] = 0x48 + ((val >> 4) & 0x07);
674 } 662 }
675 663
676 if (data->lm75[0]->addr == data->lm75[1]->addr) { 664 if (sc_addr[0] == sc_addr[1]) {
677 dev_err(&client->dev, "duplicate addresses 0x%x " 665 dev_err(&client->dev, "duplicate addresses 0x%x "
678 "for subclients\n", data->lm75[0]->addr); 666 "for subclients\n", sc_addr[0]);
679 err = -ENODEV; 667 err = -ENODEV;
680 goto ERROR_SC_2; 668 goto ERROR_SC_2;
681 } 669 }
682 670
683 for (i = 0; i <= 1; i++) { 671 data->lm75[0] = i2c_new_dummy(adapter, sc_addr[0]);
684 i2c_set_clientdata(data->lm75[i], NULL); 672 if (!data->lm75[0]) {
685 data->lm75[i]->adapter = adapter;
686 data->lm75[i]->driver = &asb100_driver;
687 strlcpy(data->lm75[i]->name, "asb100 subclient", I2C_NAME_SIZE);
688 }
689
690 if ((err = i2c_attach_client(data->lm75[0]))) {
691 dev_err(&client->dev, "subclient %d registration " 673 dev_err(&client->dev, "subclient %d registration "
692 "at address 0x%x failed.\n", i, data->lm75[0]->addr); 674 "at address 0x%x failed.\n", 1, sc_addr[0]);
675 err = -ENOMEM;
693 goto ERROR_SC_2; 676 goto ERROR_SC_2;
694 } 677 }
695 678
696 if ((err = i2c_attach_client(data->lm75[1]))) { 679 data->lm75[1] = i2c_new_dummy(adapter, sc_addr[1]);
680 if (!data->lm75[1]) {
697 dev_err(&client->dev, "subclient %d registration " 681 dev_err(&client->dev, "subclient %d registration "
698 "at address 0x%x failed.\n", i, data->lm75[1]->addr); 682 "at address 0x%x failed.\n", 2, sc_addr[1]);
683 err = -ENOMEM;
699 goto ERROR_SC_3; 684 goto ERROR_SC_3;
700 } 685 }
701 686
@@ -703,55 +688,31 @@ static int asb100_detect_subclients(struct i2c_adapter *adapter, int address,
703 688
704/* Undo inits in case of errors */ 689/* Undo inits in case of errors */
705ERROR_SC_3: 690ERROR_SC_3:
706 i2c_detach_client(data->lm75[0]); 691 i2c_unregister_device(data->lm75[0]);
707ERROR_SC_2: 692ERROR_SC_2:
708 kfree(data->lm75[1]);
709ERROR_SC_1:
710 kfree(data->lm75[0]);
711ERROR_SC_0:
712 return err; 693 return err;
713} 694}
714 695
715static int asb100_detect(struct i2c_adapter *adapter, int address, int kind) 696/* Return 0 if detection is successful, -ENODEV otherwise */
697static int asb100_detect(struct i2c_client *client, int kind,
698 struct i2c_board_info *info)
716{ 699{
717 int err; 700 struct i2c_adapter *adapter = client->adapter;
718 struct i2c_client *client;
719 struct asb100_data *data;
720 701
721 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 702 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
722 pr_debug("asb100.o: detect failed, " 703 pr_debug("asb100.o: detect failed, "
723 "smbus byte data not supported!\n"); 704 "smbus byte data not supported!\n");
724 err = -ENODEV; 705 return -ENODEV;
725 goto ERROR0;
726 } 706 }
727 707
728 /* OK. For now, we presume we have a valid client. We now create the
729 client structure, even though we cannot fill it completely yet.
730 But it allows us to access asb100_{read,write}_value. */
731
732 if (!(data = kzalloc(sizeof(struct asb100_data), GFP_KERNEL))) {
733 pr_debug("asb100.o: detect failed, kzalloc failed!\n");
734 err = -ENOMEM;
735 goto ERROR0;
736 }
737
738 client = &data->client;
739 mutex_init(&data->lock);
740 i2c_set_clientdata(client, data);
741 client->addr = address;
742 client->adapter = adapter;
743 client->driver = &asb100_driver;
744
745 /* Now, we do the remaining detection. */
746
747 /* The chip may be stuck in some other bank than bank 0. This may 708 /* The chip may be stuck in some other bank than bank 0. This may
748 make reading other information impossible. Specify a force=... or 709 make reading other information impossible. Specify a force=... or
749 force_*=... parameter, and the chip will be reset to the right 710 force_*=... parameter, and the chip will be reset to the right
750 bank. */ 711 bank. */
751 if (kind < 0) { 712 if (kind < 0) {
752 713
753 int val1 = asb100_read_value(client, ASB100_REG_BANK); 714 int val1 = i2c_smbus_read_byte_data(client, ASB100_REG_BANK);
754 int val2 = asb100_read_value(client, ASB100_REG_CHIPMAN); 715 int val2 = i2c_smbus_read_byte_data(client, ASB100_REG_CHIPMAN);
755 716
756 /* If we're in bank 0 */ 717 /* If we're in bank 0 */
757 if ((!(val1 & 0x07)) && 718 if ((!(val1 & 0x07)) &&
@@ -761,48 +722,60 @@ static int asb100_detect(struct i2c_adapter *adapter, int address, int kind)
761 ((val1 & 0x80) && (val2 != 0x06)))) { 722 ((val1 & 0x80) && (val2 != 0x06)))) {
762 pr_debug("asb100.o: detect failed, " 723 pr_debug("asb100.o: detect failed, "
763 "bad chip id 0x%02x!\n", val2); 724 "bad chip id 0x%02x!\n", val2);
764 err = -ENODEV; 725 return -ENODEV;
765 goto ERROR1;
766 } 726 }
767 727
768 } /* kind < 0 */ 728 } /* kind < 0 */
769 729
770 /* We have either had a force parameter, or we have already detected 730 /* We have either had a force parameter, or we have already detected
771 Winbond. Put it now into bank 0 and Vendor ID High Byte */ 731 Winbond. Put it now into bank 0 and Vendor ID High Byte */
772 asb100_write_value(client, ASB100_REG_BANK, 732 i2c_smbus_write_byte_data(client, ASB100_REG_BANK,
773 (asb100_read_value(client, ASB100_REG_BANK) & 0x78) | 0x80); 733 (i2c_smbus_read_byte_data(client, ASB100_REG_BANK) & 0x78)
734 | 0x80);
774 735
775 /* Determine the chip type. */ 736 /* Determine the chip type. */
776 if (kind <= 0) { 737 if (kind <= 0) {
777 int val1 = asb100_read_value(client, ASB100_REG_WCHIPID); 738 int val1 = i2c_smbus_read_byte_data(client, ASB100_REG_WCHIPID);
778 int val2 = asb100_read_value(client, ASB100_REG_CHIPMAN); 739 int val2 = i2c_smbus_read_byte_data(client, ASB100_REG_CHIPMAN);
779 740
780 if ((val1 == 0x31) && (val2 == 0x06)) 741 if ((val1 == 0x31) && (val2 == 0x06))
781 kind = asb100; 742 kind = asb100;
782 else { 743 else {
783 if (kind == 0) 744 if (kind == 0)
784 dev_warn(&client->dev, "ignoring " 745 dev_warn(&adapter->dev, "ignoring "
785 "'force' parameter for unknown chip " 746 "'force' parameter for unknown chip "
786 "at adapter %d, address 0x%02x.\n", 747 "at adapter %d, address 0x%02x.\n",
787 i2c_adapter_id(adapter), address); 748 i2c_adapter_id(adapter), client->addr);
788 err = -ENODEV; 749 return -ENODEV;
789 goto ERROR1;
790 } 750 }
791 } 751 }
792 752
793 /* Fill in remaining client fields and put it into the global list */ 753 strlcpy(info->type, "asb100", I2C_NAME_SIZE);
794 strlcpy(client->name, "asb100", I2C_NAME_SIZE);
795 data->type = kind;
796 mutex_init(&data->update_lock);
797 754
798 /* Tell the I2C layer a new client has arrived */ 755 return 0;
799 if ((err = i2c_attach_client(client))) 756}
800 goto ERROR1; 757
758static int asb100_probe(struct i2c_client *client,
759 const struct i2c_device_id *id)
760{
761 int err;
762 struct asb100_data *data;
763
764 data = kzalloc(sizeof(struct asb100_data), GFP_KERNEL);
765 if (!data) {
766 pr_debug("asb100.o: probe failed, kzalloc failed!\n");
767 err = -ENOMEM;
768 goto ERROR0;
769 }
770
771 i2c_set_clientdata(client, data);
772 mutex_init(&data->lock);
773 mutex_init(&data->update_lock);
801 774
802 /* Attach secondary lm75 clients */ 775 /* Attach secondary lm75 clients */
803 if ((err = asb100_detect_subclients(adapter, address, kind, 776 err = asb100_detect_subclients(client);
804 client))) 777 if (err)
805 goto ERROR2; 778 goto ERROR1;
806 779
807 /* Initialize the chip */ 780 /* Initialize the chip */
808 asb100_init_client(client); 781 asb100_init_client(client);
@@ -827,39 +800,25 @@ static int asb100_detect(struct i2c_adapter *adapter, int address, int kind)
827ERROR4: 800ERROR4:
828 sysfs_remove_group(&client->dev.kobj, &asb100_group); 801 sysfs_remove_group(&client->dev.kobj, &asb100_group);
829ERROR3: 802ERROR3:
830 i2c_detach_client(data->lm75[1]); 803 i2c_unregister_device(data->lm75[1]);
831 i2c_detach_client(data->lm75[0]); 804 i2c_unregister_device(data->lm75[0]);
832 kfree(data->lm75[1]);
833 kfree(data->lm75[0]);
834ERROR2:
835 i2c_detach_client(client);
836ERROR1: 805ERROR1:
837 kfree(data); 806 kfree(data);
838ERROR0: 807ERROR0:
839 return err; 808 return err;
840} 809}
841 810
842static int asb100_detach_client(struct i2c_client *client) 811static int asb100_remove(struct i2c_client *client)
843{ 812{
844 struct asb100_data *data = i2c_get_clientdata(client); 813 struct asb100_data *data = i2c_get_clientdata(client);
845 int err;
846
847 /* main client */
848 if (data) {
849 hwmon_device_unregister(data->hwmon_dev);
850 sysfs_remove_group(&client->dev.kobj, &asb100_group);
851 }
852 814
853 if ((err = i2c_detach_client(client))) 815 hwmon_device_unregister(data->hwmon_dev);
854 return err; 816 sysfs_remove_group(&client->dev.kobj, &asb100_group);
855 817
856 /* main client */ 818 i2c_unregister_device(data->lm75[1]);
857 if (data) 819 i2c_unregister_device(data->lm75[0]);
858 kfree(data);
859 820
860 /* subclient */ 821 kfree(data);
861 else
862 kfree(client);
863 822
864 return 0; 823 return 0;
865} 824}
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 01c17e387f03..d191118ba0cb 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -46,21 +46,32 @@ static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
46 46
47I2C_CLIENT_INSMOD_1(atxp1); 47I2C_CLIENT_INSMOD_1(atxp1);
48 48
49static int atxp1_attach_adapter(struct i2c_adapter * adapter); 49static int atxp1_probe(struct i2c_client *client,
50static int atxp1_detach_client(struct i2c_client * client); 50 const struct i2c_device_id *id);
51static int atxp1_remove(struct i2c_client *client);
51static struct atxp1_data * atxp1_update_device(struct device *dev); 52static struct atxp1_data * atxp1_update_device(struct device *dev);
52static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind); 53static int atxp1_detect(struct i2c_client *client, int kind,
54 struct i2c_board_info *info);
55
56static const struct i2c_device_id atxp1_id[] = {
57 { "atxp1", atxp1 },
58 { }
59};
60MODULE_DEVICE_TABLE(i2c, atxp1_id);
53 61
54static struct i2c_driver atxp1_driver = { 62static struct i2c_driver atxp1_driver = {
63 .class = I2C_CLASS_HWMON,
55 .driver = { 64 .driver = {
56 .name = "atxp1", 65 .name = "atxp1",
57 }, 66 },
58 .attach_adapter = atxp1_attach_adapter, 67 .probe = atxp1_probe,
59 .detach_client = atxp1_detach_client, 68 .remove = atxp1_remove,
69 .id_table = atxp1_id,
70 .detect = atxp1_detect,
71 .address_data = &addr_data,
60}; 72};
61 73
62struct atxp1_data { 74struct atxp1_data {
63 struct i2c_client client;
64 struct device *hwmon_dev; 75 struct device *hwmon_dev;
65 struct mutex update_lock; 76 struct mutex update_lock;
66 unsigned long last_updated; 77 unsigned long last_updated;
@@ -263,35 +274,16 @@ static const struct attribute_group atxp1_group = {
263}; 274};
264 275
265 276
266static int atxp1_attach_adapter(struct i2c_adapter *adapter) 277/* Return 0 if detection is successful, -ENODEV otherwise */
278static int atxp1_detect(struct i2c_client *new_client, int kind,
279 struct i2c_board_info *info)
267{ 280{
268 if (!(adapter->class & I2C_CLASS_HWMON)) 281 struct i2c_adapter *adapter = new_client->adapter;
269 return 0;
270 return i2c_probe(adapter, &addr_data, &atxp1_detect);
271};
272 282
273static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind)
274{
275 struct i2c_client * new_client;
276 struct atxp1_data * data;
277 int err = 0;
278 u8 temp; 283 u8 temp;
279 284
280 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 285 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
281 goto exit; 286 return -ENODEV;
282
283 if (!(data = kzalloc(sizeof(struct atxp1_data), GFP_KERNEL))) {
284 err = -ENOMEM;
285 goto exit;
286 }
287
288 new_client = &data->client;
289 i2c_set_clientdata(new_client, data);
290
291 new_client->addr = address;
292 new_client->adapter = adapter;
293 new_client->driver = &atxp1_driver;
294 new_client->flags = 0;
295 287
296 /* Detect ATXP1, checking if vendor ID registers are all zero */ 288 /* Detect ATXP1, checking if vendor ID registers are all zero */
297 if (!((i2c_smbus_read_byte_data(new_client, 0x3e) == 0) && 289 if (!((i2c_smbus_read_byte_data(new_client, 0x3e) == 0) &&
@@ -305,35 +297,46 @@ static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind)
305 297
306 if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) && 298 if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) &&
307 (i2c_smbus_read_byte_data(new_client, 0x11) == temp) )) 299 (i2c_smbus_read_byte_data(new_client, 0x11) == temp) ))
308 goto exit_free; 300 return -ENODEV;
309 } 301 }
310 302
311 /* Get VRM */ 303 /* Get VRM */
312 data->vrm = vid_which_vrm(); 304 temp = vid_which_vrm();
313 305
314 if ((data->vrm != 90) && (data->vrm != 91)) { 306 if ((temp != 90) && (temp != 91)) {
315 dev_err(&new_client->dev, "Not supporting VRM %d.%d\n", 307 dev_err(&adapter->dev, "atxp1: Not supporting VRM %d.%d\n",
316 data->vrm / 10, data->vrm % 10); 308 temp / 10, temp % 10);
317 goto exit_free; 309 return -ENODEV;
318 } 310 }
319 311
320 strncpy(new_client->name, "atxp1", I2C_NAME_SIZE); 312 strlcpy(info->type, "atxp1", I2C_NAME_SIZE);
321
322 data->valid = 0;
323 313
324 mutex_init(&data->update_lock); 314 return 0;
315}
325 316
326 err = i2c_attach_client(new_client); 317static int atxp1_probe(struct i2c_client *new_client,
318 const struct i2c_device_id *id)
319{
320 struct atxp1_data *data;
321 int err;
327 322
328 if (err) 323 data = kzalloc(sizeof(struct atxp1_data), GFP_KERNEL);
329 { 324 if (!data) {
330 dev_err(&new_client->dev, "Attach client error.\n"); 325 err = -ENOMEM;
331 goto exit_free; 326 goto exit;
332 } 327 }
333 328
329 /* Get VRM */
330 data->vrm = vid_which_vrm();
331
332 i2c_set_clientdata(new_client, data);
333 data->valid = 0;
334
335 mutex_init(&data->update_lock);
336
334 /* Register sysfs hooks */ 337 /* Register sysfs hooks */
335 if ((err = sysfs_create_group(&new_client->dev.kobj, &atxp1_group))) 338 if ((err = sysfs_create_group(&new_client->dev.kobj, &atxp1_group)))
336 goto exit_detach; 339 goto exit_free;
337 340
338 data->hwmon_dev = hwmon_device_register(&new_client->dev); 341 data->hwmon_dev = hwmon_device_register(&new_client->dev);
339 if (IS_ERR(data->hwmon_dev)) { 342 if (IS_ERR(data->hwmon_dev)) {
@@ -348,30 +351,22 @@ static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind)
348 351
349exit_remove_files: 352exit_remove_files:
350 sysfs_remove_group(&new_client->dev.kobj, &atxp1_group); 353 sysfs_remove_group(&new_client->dev.kobj, &atxp1_group);
351exit_detach:
352 i2c_detach_client(new_client);
353exit_free: 354exit_free:
354 kfree(data); 355 kfree(data);
355exit: 356exit:
356 return err; 357 return err;
357}; 358};
358 359
359static int atxp1_detach_client(struct i2c_client * client) 360static int atxp1_remove(struct i2c_client *client)
360{ 361{
361 struct atxp1_data * data = i2c_get_clientdata(client); 362 struct atxp1_data * data = i2c_get_clientdata(client);
362 int err;
363 363
364 hwmon_device_unregister(data->hwmon_dev); 364 hwmon_device_unregister(data->hwmon_dev);
365 sysfs_remove_group(&client->dev.kobj, &atxp1_group); 365 sysfs_remove_group(&client->dev.kobj, &atxp1_group);
366 366
367 err = i2c_detach_client(client); 367 kfree(data);
368
369 if (err)
370 dev_err(&client->dev, "Failed to detach client.\n");
371 else
372 kfree(data);
373 368
374 return err; 369 return 0;
375}; 370};
376 371
377static int __init atxp1_init(void) 372static int __init atxp1_init(void)
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 5f300ffed657..7415381601c3 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -72,7 +72,6 @@ static const u8 DS1621_REG_TEMP[3] = {
72 72
73/* Each client has this additional data */ 73/* Each client has this additional data */
74struct ds1621_data { 74struct ds1621_data {
75 struct i2c_client client;
76 struct device *hwmon_dev; 75 struct device *hwmon_dev;
77 struct mutex update_lock; 76 struct mutex update_lock;
78 char valid; /* !=0 if following fields are valid */ 77 char valid; /* !=0 if following fields are valid */
@@ -82,20 +81,32 @@ struct ds1621_data {
82 u8 conf; /* Register encoding, combined */ 81 u8 conf; /* Register encoding, combined */
83}; 82};
84 83
85static int ds1621_attach_adapter(struct i2c_adapter *adapter); 84static int ds1621_probe(struct i2c_client *client,
86static int ds1621_detect(struct i2c_adapter *adapter, int address, 85 const struct i2c_device_id *id);
87 int kind); 86static int ds1621_detect(struct i2c_client *client, int kind,
87 struct i2c_board_info *info);
88static void ds1621_init_client(struct i2c_client *client); 88static void ds1621_init_client(struct i2c_client *client);
89static int ds1621_detach_client(struct i2c_client *client); 89static int ds1621_remove(struct i2c_client *client);
90static struct ds1621_data *ds1621_update_client(struct device *dev); 90static struct ds1621_data *ds1621_update_client(struct device *dev);
91 91
92static const struct i2c_device_id ds1621_id[] = {
93 { "ds1621", ds1621 },
94 { "ds1625", ds1621 },
95 { }
96};
97MODULE_DEVICE_TABLE(i2c, ds1621_id);
98
92/* This is the driver that will be inserted */ 99/* This is the driver that will be inserted */
93static struct i2c_driver ds1621_driver = { 100static struct i2c_driver ds1621_driver = {
101 .class = I2C_CLASS_HWMON,
94 .driver = { 102 .driver = {
95 .name = "ds1621", 103 .name = "ds1621",
96 }, 104 },
97 .attach_adapter = ds1621_attach_adapter, 105 .probe = ds1621_probe,
98 .detach_client = ds1621_detach_client, 106 .remove = ds1621_remove,
107 .id_table = ds1621_id,
108 .detect = ds1621_detect,
109 .address_data = &addr_data,
99}; 110};
100 111
101/* All registers are word-sized, except for the configuration register. 112/* All registers are word-sized, except for the configuration register.
@@ -199,40 +210,18 @@ static const struct attribute_group ds1621_group = {
199}; 210};
200 211
201 212
202static int ds1621_attach_adapter(struct i2c_adapter *adapter) 213/* Return 0 if detection is successful, -ENODEV otherwise */
203{ 214static int ds1621_detect(struct i2c_client *client, int kind,
204 if (!(adapter->class & I2C_CLASS_HWMON)) 215 struct i2c_board_info *info)
205 return 0;
206 return i2c_probe(adapter, &addr_data, ds1621_detect);
207}
208
209/* This function is called by i2c_probe */
210static int ds1621_detect(struct i2c_adapter *adapter, int address,
211 int kind)
212{ 216{
217 struct i2c_adapter *adapter = client->adapter;
213 int conf, temp; 218 int conf, temp;
214 struct i2c_client *client; 219 int i;
215 struct ds1621_data *data;
216 int i, err = 0;
217 220
218 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA 221 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
219 | I2C_FUNC_SMBUS_WORD_DATA 222 | I2C_FUNC_SMBUS_WORD_DATA
220 | I2C_FUNC_SMBUS_WRITE_BYTE)) 223 | I2C_FUNC_SMBUS_WRITE_BYTE))
221 goto exit; 224 return -ENODEV;
222
223 /* OK. For now, we presume we have a valid client. We now create the
224 client structure, even though we cannot fill it completely yet.
225 But it allows us to access ds1621_{read,write}_value. */
226 if (!(data = kzalloc(sizeof(struct ds1621_data), GFP_KERNEL))) {
227 err = -ENOMEM;
228 goto exit;
229 }
230
231 client = &data->client;
232 i2c_set_clientdata(client, data);
233 client->addr = address;
234 client->adapter = adapter;
235 client->driver = &ds1621_driver;
236 225
237 /* Now, we do the remaining detection. It is lousy. */ 226 /* Now, we do the remaining detection. It is lousy. */
238 if (kind < 0) { 227 if (kind < 0) {
@@ -241,29 +230,41 @@ static int ds1621_detect(struct i2c_adapter *adapter, int address,
241 improbable in our case. */ 230 improbable in our case. */
242 conf = ds1621_read_value(client, DS1621_REG_CONF); 231 conf = ds1621_read_value(client, DS1621_REG_CONF);
243 if (conf & DS1621_REG_CONFIG_NVB) 232 if (conf & DS1621_REG_CONFIG_NVB)
244 goto exit_free; 233 return -ENODEV;
245 /* The 7 lowest bits of a temperature should always be 0. */ 234 /* The 7 lowest bits of a temperature should always be 0. */
246 for (i = 0; i < ARRAY_SIZE(data->temp); i++) { 235 for (i = 0; i < ARRAY_SIZE(DS1621_REG_TEMP); i++) {
247 temp = ds1621_read_value(client, DS1621_REG_TEMP[i]); 236 temp = ds1621_read_value(client, DS1621_REG_TEMP[i]);
248 if (temp & 0x007f) 237 if (temp & 0x007f)
249 goto exit_free; 238 return -ENODEV;
250 } 239 }
251 } 240 }
252 241
253 /* Fill in remaining client fields and put it into the global list */ 242 strlcpy(info->type, "ds1621", I2C_NAME_SIZE);
254 strlcpy(client->name, "ds1621", I2C_NAME_SIZE);
255 mutex_init(&data->update_lock);
256 243
257 /* Tell the I2C layer a new client has arrived */ 244 return 0;
258 if ((err = i2c_attach_client(client))) 245}
259 goto exit_free; 246
247static int ds1621_probe(struct i2c_client *client,
248 const struct i2c_device_id *id)
249{
250 struct ds1621_data *data;
251 int err;
252
253 data = kzalloc(sizeof(struct ds1621_data), GFP_KERNEL);
254 if (!data) {
255 err = -ENOMEM;
256 goto exit;
257 }
258
259 i2c_set_clientdata(client, data);
260 mutex_init(&data->update_lock);
260 261
261 /* Initialize the DS1621 chip */ 262 /* Initialize the DS1621 chip */
262 ds1621_init_client(client); 263 ds1621_init_client(client);
263 264
264 /* Register sysfs hooks */ 265 /* Register sysfs hooks */
265 if ((err = sysfs_create_group(&client->dev.kobj, &ds1621_group))) 266 if ((err = sysfs_create_group(&client->dev.kobj, &ds1621_group)))
266 goto exit_detach; 267 goto exit_free;
267 268
268 data->hwmon_dev = hwmon_device_register(&client->dev); 269 data->hwmon_dev = hwmon_device_register(&client->dev);
269 if (IS_ERR(data->hwmon_dev)) { 270 if (IS_ERR(data->hwmon_dev)) {
@@ -275,25 +276,19 @@ static int ds1621_detect(struct i2c_adapter *adapter, int address,
275 276
276 exit_remove_files: 277 exit_remove_files:
277 sysfs_remove_group(&client->dev.kobj, &ds1621_group); 278 sysfs_remove_group(&client->dev.kobj, &ds1621_group);
278 exit_detach:
279 i2c_detach_client(client);
280 exit_free: 279 exit_free:
281 kfree(data); 280 kfree(data);
282 exit: 281 exit:
283 return err; 282 return err;
284} 283}
285 284
286static int ds1621_detach_client(struct i2c_client *client) 285static int ds1621_remove(struct i2c_client *client)
287{ 286{
288 struct ds1621_data *data = i2c_get_clientdata(client); 287 struct ds1621_data *data = i2c_get_clientdata(client);
289 int err;
290 288
291 hwmon_device_unregister(data->hwmon_dev); 289 hwmon_device_unregister(data->hwmon_dev);
292 sysfs_remove_group(&client->dev.kobj, &ds1621_group); 290 sysfs_remove_group(&client->dev.kobj, &ds1621_group);
293 291
294 if ((err = i2c_detach_client(client)))
295 return err;
296
297 kfree(data); 292 kfree(data);
298 293
299 return 0; 294 return 0;
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index dc1f30e432ea..1692de369969 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -87,7 +87,6 @@ I2C_CLIENT_INSMOD_2(f75373, f75375);
87 87
88struct f75375_data { 88struct f75375_data {
89 unsigned short addr; 89 unsigned short addr;
90 struct i2c_client *client;
91 struct device *hwmon_dev; 90 struct device *hwmon_dev;
92 91
93 const char *name; 92 const char *name;
@@ -114,21 +113,12 @@ struct f75375_data {
114 s8 temp_max_hyst[2]; 113 s8 temp_max_hyst[2];
115}; 114};
116 115
117static int f75375_attach_adapter(struct i2c_adapter *adapter); 116static int f75375_detect(struct i2c_client *client, int kind,
118static int f75375_detect(struct i2c_adapter *adapter, int address, int kind); 117 struct i2c_board_info *info);
119static int f75375_detach_client(struct i2c_client *client);
120static int f75375_probe(struct i2c_client *client, 118static int f75375_probe(struct i2c_client *client,
121 const struct i2c_device_id *id); 119 const struct i2c_device_id *id);
122static int f75375_remove(struct i2c_client *client); 120static int f75375_remove(struct i2c_client *client);
123 121
124static struct i2c_driver f75375_legacy_driver = {
125 .driver = {
126 .name = "f75375_legacy",
127 },
128 .attach_adapter = f75375_attach_adapter,
129 .detach_client = f75375_detach_client,
130};
131
132static const struct i2c_device_id f75375_id[] = { 122static const struct i2c_device_id f75375_id[] = {
133 { "f75373", f75373 }, 123 { "f75373", f75373 },
134 { "f75375", f75375 }, 124 { "f75375", f75375 },
@@ -137,12 +127,15 @@ static const struct i2c_device_id f75375_id[] = {
137MODULE_DEVICE_TABLE(i2c, f75375_id); 127MODULE_DEVICE_TABLE(i2c, f75375_id);
138 128
139static struct i2c_driver f75375_driver = { 129static struct i2c_driver f75375_driver = {
130 .class = I2C_CLASS_HWMON,
140 .driver = { 131 .driver = {
141 .name = "f75375", 132 .name = "f75375",
142 }, 133 },
143 .probe = f75375_probe, 134 .probe = f75375_probe,
144 .remove = f75375_remove, 135 .remove = f75375_remove,
145 .id_table = f75375_id, 136 .id_table = f75375_id,
137 .detect = f75375_detect,
138 .address_data = &addr_data,
146}; 139};
147 140
148static inline int f75375_read8(struct i2c_client *client, u8 reg) 141static inline int f75375_read8(struct i2c_client *client, u8 reg)
@@ -607,22 +600,6 @@ static const struct attribute_group f75375_group = {
607 .attrs = f75375_attributes, 600 .attrs = f75375_attributes,
608}; 601};
609 602
610static int f75375_detach_client(struct i2c_client *client)
611{
612 int err;
613
614 f75375_remove(client);
615 err = i2c_detach_client(client);
616 if (err) {
617 dev_err(&client->dev,
618 "Client deregistration failed, "
619 "client not detached.\n");
620 return err;
621 }
622 kfree(client);
623 return 0;
624}
625
626static void f75375_init(struct i2c_client *client, struct f75375_data *data, 603static void f75375_init(struct i2c_client *client, struct f75375_data *data,
627 struct f75375s_platform_data *f75375s_pdata) 604 struct f75375s_platform_data *f75375s_pdata)
628{ 605{
@@ -651,7 +628,6 @@ static int f75375_probe(struct i2c_client *client,
651 return -ENOMEM; 628 return -ENOMEM;
652 629
653 i2c_set_clientdata(client, data); 630 i2c_set_clientdata(client, data);
654 data->client = client;
655 mutex_init(&data->update_lock); 631 mutex_init(&data->update_lock);
656 data->kind = id->driver_data; 632 data->kind = id->driver_data;
657 633
@@ -700,29 +676,13 @@ static int f75375_remove(struct i2c_client *client)
700 return 0; 676 return 0;
701} 677}
702 678
703static int f75375_attach_adapter(struct i2c_adapter *adapter) 679/* Return 0 if detection is successful, -ENODEV otherwise */
704{ 680static int f75375_detect(struct i2c_client *client, int kind,
705 if (!(adapter->class & I2C_CLASS_HWMON)) 681 struct i2c_board_info *info)
706 return 0;
707 return i2c_probe(adapter, &addr_data, f75375_detect);
708}
709
710/* This function is called by i2c_probe */
711static int f75375_detect(struct i2c_adapter *adapter, int address, int kind)
712{ 682{
713 struct i2c_client *client; 683 struct i2c_adapter *adapter = client->adapter;
714 u8 version = 0; 684 u8 version = 0;
715 int err = 0;
716 const char *name = ""; 685 const char *name = "";
717 struct i2c_device_id id;
718
719 if (!(client = kzalloc(sizeof(*client), GFP_KERNEL))) {
720 err = -ENOMEM;
721 goto exit;
722 }
723 client->addr = address;
724 client->adapter = adapter;
725 client->driver = &f75375_legacy_driver;
726 686
727 if (kind < 0) { 687 if (kind < 0) {
728 u16 vendid = f75375_read16(client, F75375_REG_VENDOR); 688 u16 vendid = f75375_read16(client, F75375_REG_VENDOR);
@@ -736,7 +696,7 @@ static int f75375_detect(struct i2c_adapter *adapter, int address, int kind)
736 dev_err(&adapter->dev, 696 dev_err(&adapter->dev,
737 "failed,%02X,%02X,%02X\n", 697 "failed,%02X,%02X,%02X\n",
738 chipid, version, vendid); 698 chipid, version, vendid);
739 goto exit_free; 699 return -ENODEV;
740 } 700 }
741 } 701 }
742 702
@@ -746,43 +706,18 @@ static int f75375_detect(struct i2c_adapter *adapter, int address, int kind)
746 name = "f75373"; 706 name = "f75373";
747 } 707 }
748 dev_info(&adapter->dev, "found %s version: %02X\n", name, version); 708 dev_info(&adapter->dev, "found %s version: %02X\n", name, version);
749 strlcpy(client->name, name, I2C_NAME_SIZE); 709 strlcpy(info->type, name, I2C_NAME_SIZE);
750
751 if ((err = i2c_attach_client(client)))
752 goto exit_free;
753
754 strlcpy(id.name, name, I2C_NAME_SIZE);
755 id.driver_data = kind;
756 if ((err = f75375_probe(client, &id)) < 0)
757 goto exit_detach;
758 710
759 return 0; 711 return 0;
760
761exit_detach:
762 i2c_detach_client(client);
763exit_free:
764 kfree(client);
765exit:
766 return err;
767} 712}
768 713
769static int __init sensors_f75375_init(void) 714static int __init sensors_f75375_init(void)
770{ 715{
771 int status; 716 return i2c_add_driver(&f75375_driver);
772 status = i2c_add_driver(&f75375_driver);
773 if (status)
774 return status;
775
776 status = i2c_add_driver(&f75375_legacy_driver);
777 if (status)
778 i2c_del_driver(&f75375_driver);
779
780 return status;
781} 717}
782 718
783static void __exit sensors_f75375_exit(void) 719static void __exit sensors_f75375_exit(void)
784{ 720{
785 i2c_del_driver(&f75375_legacy_driver);
786 i2c_del_driver(&f75375_driver); 721 i2c_del_driver(&f75375_driver);
787} 722}
788 723
diff --git a/drivers/hwmon/fscher.c b/drivers/hwmon/fscher.c
index ed26b66e0831..12c70e402cb2 100644
--- a/drivers/hwmon/fscher.c
+++ b/drivers/hwmon/fscher.c
@@ -106,9 +106,11 @@ I2C_CLIENT_INSMOD_1(fscher);
106 * Functions declaration 106 * Functions declaration
107 */ 107 */
108 108
109static int fscher_attach_adapter(struct i2c_adapter *adapter); 109static int fscher_probe(struct i2c_client *client,
110static int fscher_detect(struct i2c_adapter *adapter, int address, int kind); 110 const struct i2c_device_id *id);
111static int fscher_detach_client(struct i2c_client *client); 111static int fscher_detect(struct i2c_client *client, int kind,
112 struct i2c_board_info *info);
113static int fscher_remove(struct i2c_client *client);
112static struct fscher_data *fscher_update_device(struct device *dev); 114static struct fscher_data *fscher_update_device(struct device *dev);
113static void fscher_init_client(struct i2c_client *client); 115static void fscher_init_client(struct i2c_client *client);
114 116
@@ -119,12 +121,21 @@ static int fscher_write_value(struct i2c_client *client, u8 reg, u8 value);
119 * Driver data (common to all clients) 121 * Driver data (common to all clients)
120 */ 122 */
121 123
124static const struct i2c_device_id fscher_id[] = {
125 { "fscher", fscher },
126 { }
127};
128
122static struct i2c_driver fscher_driver = { 129static struct i2c_driver fscher_driver = {
130 .class = I2C_CLASS_HWMON,
123 .driver = { 131 .driver = {
124 .name = "fscher", 132 .name = "fscher",
125 }, 133 },
126 .attach_adapter = fscher_attach_adapter, 134 .probe = fscher_probe,
127 .detach_client = fscher_detach_client, 135 .remove = fscher_remove,
136 .id_table = fscher_id,
137 .detect = fscher_detect,
138 .address_data = &addr_data,
128}; 139};
129 140
130/* 141/*
@@ -132,7 +143,6 @@ static struct i2c_driver fscher_driver = {
132 */ 143 */
133 144
134struct fscher_data { 145struct fscher_data {
135 struct i2c_client client;
136 struct device *hwmon_dev; 146 struct device *hwmon_dev;
137 struct mutex update_lock; 147 struct mutex update_lock;
138 char valid; /* zero until following fields are valid */ 148 char valid; /* zero until following fields are valid */
@@ -283,38 +293,14 @@ static const struct attribute_group fscher_group = {
283 * Real code 293 * Real code
284 */ 294 */
285 295
286static int fscher_attach_adapter(struct i2c_adapter *adapter) 296/* Return 0 if detection is successful, -ENODEV otherwise */
287{ 297static int fscher_detect(struct i2c_client *new_client, int kind,
288 if (!(adapter->class & I2C_CLASS_HWMON)) 298 struct i2c_board_info *info)
289 return 0;
290 return i2c_probe(adapter, &addr_data, fscher_detect);
291}
292
293static int fscher_detect(struct i2c_adapter *adapter, int address, int kind)
294{ 299{
295 struct i2c_client *new_client; 300 struct i2c_adapter *adapter = new_client->adapter;
296 struct fscher_data *data;
297 int err = 0;
298 301
299 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 302 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
300 goto exit; 303 return -ENODEV;
301
302 /* OK. For now, we presume we have a valid client. We now create the
303 * client structure, even though we cannot fill it completely yet.
304 * But it allows us to access i2c_smbus_read_byte_data. */
305 if (!(data = kzalloc(sizeof(struct fscher_data), GFP_KERNEL))) {
306 err = -ENOMEM;
307 goto exit;
308 }
309
310 /* The common I2C client data is placed right before the
311 * Hermes-specific data. */
312 new_client = &data->client;
313 i2c_set_clientdata(new_client, data);
314 new_client->addr = address;
315 new_client->adapter = adapter;
316 new_client->driver = &fscher_driver;
317 new_client->flags = 0;
318 304
319 /* Do the remaining detection unless force or force_fscher parameter */ 305 /* Do the remaining detection unless force or force_fscher parameter */
320 if (kind < 0) { 306 if (kind < 0) {
@@ -324,24 +310,35 @@ static int fscher_detect(struct i2c_adapter *adapter, int address, int kind)
324 FSCHER_REG_IDENT_1) != 0x45) /* 'E' */ 310 FSCHER_REG_IDENT_1) != 0x45) /* 'E' */
325 || (i2c_smbus_read_byte_data(new_client, 311 || (i2c_smbus_read_byte_data(new_client,
326 FSCHER_REG_IDENT_2) != 0x52)) /* 'R' */ 312 FSCHER_REG_IDENT_2) != 0x52)) /* 'R' */
327 goto exit_free; 313 return -ENODEV;
314 }
315
316 strlcpy(info->type, "fscher", I2C_NAME_SIZE);
317
318 return 0;
319}
320
321static int fscher_probe(struct i2c_client *new_client,
322 const struct i2c_device_id *id)
323{
324 struct fscher_data *data;
325 int err;
326
327 data = kzalloc(sizeof(struct fscher_data), GFP_KERNEL);
328 if (!data) {
329 err = -ENOMEM;
330 goto exit;
328 } 331 }
329 332
330 /* Fill in the remaining client fields and put it into the 333 i2c_set_clientdata(new_client, data);
331 * global list */
332 strlcpy(new_client->name, "fscher", I2C_NAME_SIZE);
333 data->valid = 0; 334 data->valid = 0;
334 mutex_init(&data->update_lock); 335 mutex_init(&data->update_lock);
335 336
336 /* Tell the I2C layer a new client has arrived */
337 if ((err = i2c_attach_client(new_client)))
338 goto exit_free;
339
340 fscher_init_client(new_client); 337 fscher_init_client(new_client);
341 338
342 /* Register sysfs hooks */ 339 /* Register sysfs hooks */
343 if ((err = sysfs_create_group(&new_client->dev.kobj, &fscher_group))) 340 if ((err = sysfs_create_group(&new_client->dev.kobj, &fscher_group)))
344 goto exit_detach; 341 goto exit_free;
345 342
346 data->hwmon_dev = hwmon_device_register(&new_client->dev); 343 data->hwmon_dev = hwmon_device_register(&new_client->dev);
347 if (IS_ERR(data->hwmon_dev)) { 344 if (IS_ERR(data->hwmon_dev)) {
@@ -353,25 +350,19 @@ static int fscher_detect(struct i2c_adapter *adapter, int address, int kind)
353 350
354exit_remove_files: 351exit_remove_files:
355 sysfs_remove_group(&new_client->dev.kobj, &fscher_group); 352 sysfs_remove_group(&new_client->dev.kobj, &fscher_group);
356exit_detach:
357 i2c_detach_client(new_client);
358exit_free: 353exit_free:
359 kfree(data); 354 kfree(data);
360exit: 355exit:
361 return err; 356 return err;
362} 357}
363 358
364static int fscher_detach_client(struct i2c_client *client) 359static int fscher_remove(struct i2c_client *client)
365{ 360{
366 struct fscher_data *data = i2c_get_clientdata(client); 361 struct fscher_data *data = i2c_get_clientdata(client);
367 int err;
368 362
369 hwmon_device_unregister(data->hwmon_dev); 363 hwmon_device_unregister(data->hwmon_dev);
370 sysfs_remove_group(&client->dev.kobj, &fscher_group); 364 sysfs_remove_group(&client->dev.kobj, &fscher_group);
371 365
372 if ((err = i2c_detach_client(client)))
373 return err;
374
375 kfree(data); 366 kfree(data);
376 return 0; 367 return 0;
377} 368}
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index bd89d270a5ed..967170368933 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -171,20 +171,37 @@ static const int FSCHMD_NO_TEMP_SENSORS[5] = { 3, 3, 4, 3, 5 };
171 * Functions declarations 171 * Functions declarations
172 */ 172 */
173 173
174static int fschmd_attach_adapter(struct i2c_adapter *adapter); 174static int fschmd_probe(struct i2c_client *client,
175static int fschmd_detach_client(struct i2c_client *client); 175 const struct i2c_device_id *id);
176static int fschmd_detect(struct i2c_client *client, int kind,
177 struct i2c_board_info *info);
178static int fschmd_remove(struct i2c_client *client);
176static struct fschmd_data *fschmd_update_device(struct device *dev); 179static struct fschmd_data *fschmd_update_device(struct device *dev);
177 180
178/* 181/*
179 * Driver data (common to all clients) 182 * Driver data (common to all clients)
180 */ 183 */
181 184
185static const struct i2c_device_id fschmd_id[] = {
186 { "fscpos", fscpos },
187 { "fscher", fscher },
188 { "fscscy", fscscy },
189 { "fschrc", fschrc },
190 { "fschmd", fschmd },
191 { }
192};
193MODULE_DEVICE_TABLE(i2c, fschmd_id);
194
182static struct i2c_driver fschmd_driver = { 195static struct i2c_driver fschmd_driver = {
196 .class = I2C_CLASS_HWMON,
183 .driver = { 197 .driver = {
184 .name = FSCHMD_NAME, 198 .name = FSCHMD_NAME,
185 }, 199 },
186 .attach_adapter = fschmd_attach_adapter, 200 .probe = fschmd_probe,
187 .detach_client = fschmd_detach_client, 201 .remove = fschmd_remove,
202 .id_table = fschmd_id,
203 .detect = fschmd_detect,
204 .address_data = &addr_data,
188}; 205};
189 206
190/* 207/*
@@ -192,7 +209,6 @@ static struct i2c_driver fschmd_driver = {
192 */ 209 */
193 210
194struct fschmd_data { 211struct fschmd_data {
195 struct i2c_client client;
196 struct device *hwmon_dev; 212 struct device *hwmon_dev;
197 struct mutex update_lock; 213 struct mutex update_lock;
198 int kind; 214 int kind;
@@ -269,7 +285,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
269 v = SENSORS_LIMIT(v, -128, 127) + 128; 285 v = SENSORS_LIMIT(v, -128, 127) + 128;
270 286
271 mutex_lock(&data->update_lock); 287 mutex_lock(&data->update_lock);
272 i2c_smbus_write_byte_data(&data->client, 288 i2c_smbus_write_byte_data(to_i2c_client(dev),
273 FSCHMD_REG_TEMP_LIMIT[data->kind][index], v); 289 FSCHMD_REG_TEMP_LIMIT[data->kind][index], v);
274 data->temp_max[index] = v; 290 data->temp_max[index] = v;
275 mutex_unlock(&data->update_lock); 291 mutex_unlock(&data->update_lock);
@@ -346,14 +362,14 @@ static ssize_t store_fan_div(struct device *dev, struct device_attribute
346 362
347 mutex_lock(&data->update_lock); 363 mutex_lock(&data->update_lock);
348 364
349 reg = i2c_smbus_read_byte_data(&data->client, 365 reg = i2c_smbus_read_byte_data(to_i2c_client(dev),
350 FSCHMD_REG_FAN_RIPPLE[data->kind][index]); 366 FSCHMD_REG_FAN_RIPPLE[data->kind][index]);
351 367
352 /* bits 2..7 reserved => mask with 0x03 */ 368 /* bits 2..7 reserved => mask with 0x03 */
353 reg &= ~0x03; 369 reg &= ~0x03;
354 reg |= v; 370 reg |= v;
355 371
356 i2c_smbus_write_byte_data(&data->client, 372 i2c_smbus_write_byte_data(to_i2c_client(dev),
357 FSCHMD_REG_FAN_RIPPLE[data->kind][index], reg); 373 FSCHMD_REG_FAN_RIPPLE[data->kind][index], reg);
358 374
359 data->fan_ripple[index] = reg; 375 data->fan_ripple[index] = reg;
@@ -416,7 +432,7 @@ static ssize_t store_pwm_auto_point1_pwm(struct device *dev,
416 432
417 mutex_lock(&data->update_lock); 433 mutex_lock(&data->update_lock);
418 434
419 i2c_smbus_write_byte_data(&data->client, 435 i2c_smbus_write_byte_data(to_i2c_client(dev),
420 FSCHMD_REG_FAN_MIN[data->kind][index], v); 436 FSCHMD_REG_FAN_MIN[data->kind][index], v);
421 data->fan_min[index] = v; 437 data->fan_min[index] = v;
422 438
@@ -448,14 +464,14 @@ static ssize_t store_alert_led(struct device *dev,
448 464
449 mutex_lock(&data->update_lock); 465 mutex_lock(&data->update_lock);
450 466
451 reg = i2c_smbus_read_byte_data(&data->client, FSCHMD_REG_CONTROL); 467 reg = i2c_smbus_read_byte_data(to_i2c_client(dev), FSCHMD_REG_CONTROL);
452 468
453 if (v) 469 if (v)
454 reg |= FSCHMD_CONTROL_ALERT_LED_MASK; 470 reg |= FSCHMD_CONTROL_ALERT_LED_MASK;
455 else 471 else
456 reg &= ~FSCHMD_CONTROL_ALERT_LED_MASK; 472 reg &= ~FSCHMD_CONTROL_ALERT_LED_MASK;
457 473
458 i2c_smbus_write_byte_data(&data->client, FSCHMD_REG_CONTROL, reg); 474 i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_CONTROL, reg);
459 475
460 data->global_control = reg; 476 data->global_control = reg;
461 477
@@ -600,32 +616,15 @@ static void fschmd_dmi_decode(const struct dmi_header *header)
600 } 616 }
601} 617}
602 618
603static int fschmd_detect(struct i2c_adapter *adapter, int address, int kind) 619static int fschmd_detect(struct i2c_client *client, int kind,
620 struct i2c_board_info *info)
604{ 621{
605 struct i2c_client *client; 622 struct i2c_adapter *adapter = client->adapter;
606 struct fschmd_data *data;
607 u8 revision;
608 const char * const names[5] = { "Poseidon", "Hermes", "Scylla",
609 "Heracles", "Heimdall" };
610 const char * const client_names[5] = { "fscpos", "fscher", "fscscy", 623 const char * const client_names[5] = { "fscpos", "fscher", "fscscy",
611 "fschrc", "fschmd" }; 624 "fschrc", "fschmd" };
612 int i, err = 0;
613 625
614 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 626 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
615 return 0; 627 return -ENODEV;
616
617 /* OK. For now, we presume we have a valid client. We now create the
618 * client structure, even though we cannot fill it completely yet.
619 * But it allows us to access i2c_smbus_read_byte_data. */
620 if (!(data = kzalloc(sizeof(struct fschmd_data), GFP_KERNEL)))
621 return -ENOMEM;
622
623 client = &data->client;
624 i2c_set_clientdata(client, data);
625 client->addr = address;
626 client->adapter = adapter;
627 client->driver = &fschmd_driver;
628 mutex_init(&data->update_lock);
629 628
630 /* Detect & Identify the chip */ 629 /* Detect & Identify the chip */
631 if (kind <= 0) { 630 if (kind <= 0) {
@@ -650,9 +649,31 @@ static int fschmd_detect(struct i2c_adapter *adapter, int address, int kind)
650 else if (!strcmp(id, "HMD")) 649 else if (!strcmp(id, "HMD"))
651 kind = fschmd; 650 kind = fschmd;
652 else 651 else
653 goto exit_free; 652 return -ENODEV;
654 } 653 }
655 654
655 strlcpy(info->type, client_names[kind - 1], I2C_NAME_SIZE);
656
657 return 0;
658}
659
660static int fschmd_probe(struct i2c_client *client,
661 const struct i2c_device_id *id)
662{
663 struct fschmd_data *data;
664 u8 revision;
665 const char * const names[5] = { "Poseidon", "Hermes", "Scylla",
666 "Heracles", "Heimdall" };
667 int i, err;
668 enum chips kind = id->driver_data;
669
670 data = kzalloc(sizeof(struct fschmd_data), GFP_KERNEL);
671 if (!data)
672 return -ENOMEM;
673
674 i2c_set_clientdata(client, data);
675 mutex_init(&data->update_lock);
676
656 if (kind == fscpos) { 677 if (kind == fscpos) {
657 /* The Poseidon has hardwired temp limits, fill these 678 /* The Poseidon has hardwired temp limits, fill these
658 in for the alarm resetting code */ 679 in for the alarm resetting code */
@@ -674,11 +695,6 @@ static int fschmd_detect(struct i2c_adapter *adapter, int address, int kind)
674 695
675 /* i2c kind goes from 1-5, we want from 0-4 to address arrays */ 696 /* i2c kind goes from 1-5, we want from 0-4 to address arrays */
676 data->kind = kind - 1; 697 data->kind = kind - 1;
677 strlcpy(client->name, client_names[data->kind], I2C_NAME_SIZE);
678
679 /* Tell the I2C layer a new client has arrived */
680 if ((err = i2c_attach_client(client)))
681 goto exit_free;
682 698
683 for (i = 0; i < ARRAY_SIZE(fschmd_attr); i++) { 699 for (i = 0; i < ARRAY_SIZE(fschmd_attr); i++) {
684 err = device_create_file(&client->dev, 700 err = device_create_file(&client->dev,
@@ -726,25 +742,14 @@ static int fschmd_detect(struct i2c_adapter *adapter, int address, int kind)
726 return 0; 742 return 0;
727 743
728exit_detach: 744exit_detach:
729 fschmd_detach_client(client); /* will also free data for us */ 745 fschmd_remove(client); /* will also free data for us */
730 return err;
731
732exit_free:
733 kfree(data);
734 return err; 746 return err;
735} 747}
736 748
737static int fschmd_attach_adapter(struct i2c_adapter *adapter) 749static int fschmd_remove(struct i2c_client *client)
738{
739 if (!(adapter->class & I2C_CLASS_HWMON))
740 return 0;
741 return i2c_probe(adapter, &addr_data, fschmd_detect);
742}
743
744static int fschmd_detach_client(struct i2c_client *client)
745{ 750{
746 struct fschmd_data *data = i2c_get_clientdata(client); 751 struct fschmd_data *data = i2c_get_clientdata(client);
747 int i, err; 752 int i;
748 753
749 /* Check if registered in case we're called from fschmd_detect 754 /* Check if registered in case we're called from fschmd_detect
750 to cleanup after an error */ 755 to cleanup after an error */
@@ -760,9 +765,6 @@ static int fschmd_detach_client(struct i2c_client *client)
760 device_remove_file(&client->dev, 765 device_remove_file(&client->dev,
761 &fschmd_fan_attr[i].dev_attr); 766 &fschmd_fan_attr[i].dev_attr);
762 767
763 if ((err = i2c_detach_client(client)))
764 return err;
765
766 kfree(data); 768 kfree(data);
767 return 0; 769 return 0;
768} 770}
diff --git a/drivers/hwmon/fscpos.c b/drivers/hwmon/fscpos.c
index 00f48484e54b..8a7bcf500b4e 100644
--- a/drivers/hwmon/fscpos.c
+++ b/drivers/hwmon/fscpos.c
@@ -87,9 +87,11 @@ static u8 FSCPOS_REG_TEMP_STATE[] = { 0x71, 0x81, 0x91 };
87/* 87/*
88 * Functions declaration 88 * Functions declaration
89 */ 89 */
90static int fscpos_attach_adapter(struct i2c_adapter *adapter); 90static int fscpos_probe(struct i2c_client *client,
91static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind); 91 const struct i2c_device_id *id);
92static int fscpos_detach_client(struct i2c_client *client); 92static int fscpos_detect(struct i2c_client *client, int kind,
93 struct i2c_board_info *info);
94static int fscpos_remove(struct i2c_client *client);
93 95
94static int fscpos_read_value(struct i2c_client *client, u8 reg); 96static int fscpos_read_value(struct i2c_client *client, u8 reg);
95static int fscpos_write_value(struct i2c_client *client, u8 reg, u8 value); 97static int fscpos_write_value(struct i2c_client *client, u8 reg, u8 value);
@@ -101,19 +103,27 @@ static void reset_fan_alarm(struct i2c_client *client, int nr);
101/* 103/*
102 * Driver data (common to all clients) 104 * Driver data (common to all clients)
103 */ 105 */
106static const struct i2c_device_id fscpos_id[] = {
107 { "fscpos", fscpos },
108 { }
109};
110
104static struct i2c_driver fscpos_driver = { 111static struct i2c_driver fscpos_driver = {
112 .class = I2C_CLASS_HWMON,
105 .driver = { 113 .driver = {
106 .name = "fscpos", 114 .name = "fscpos",
107 }, 115 },
108 .attach_adapter = fscpos_attach_adapter, 116 .probe = fscpos_probe,
109 .detach_client = fscpos_detach_client, 117 .remove = fscpos_remove,
118 .id_table = fscpos_id,
119 .detect = fscpos_detect,
120 .address_data = &addr_data,
110}; 121};
111 122
112/* 123/*
113 * Client data (each client gets its own) 124 * Client data (each client gets its own)
114 */ 125 */
115struct fscpos_data { 126struct fscpos_data {
116 struct i2c_client client;
117 struct device *hwmon_dev; 127 struct device *hwmon_dev;
118 struct mutex update_lock; 128 struct mutex update_lock;
119 char valid; /* 0 until following fields are valid */ 129 char valid; /* 0 until following fields are valid */
@@ -470,39 +480,14 @@ static const struct attribute_group fscpos_group = {
470 .attrs = fscpos_attributes, 480 .attrs = fscpos_attributes,
471}; 481};
472 482
473static int fscpos_attach_adapter(struct i2c_adapter *adapter) 483/* Return 0 if detection is successful, -ENODEV otherwise */
474{ 484static int fscpos_detect(struct i2c_client *new_client, int kind,
475 if (!(adapter->class & I2C_CLASS_HWMON)) 485 struct i2c_board_info *info)
476 return 0;
477 return i2c_probe(adapter, &addr_data, fscpos_detect);
478}
479
480static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind)
481{ 486{
482 struct i2c_client *new_client; 487 struct i2c_adapter *adapter = new_client->adapter;
483 struct fscpos_data *data;
484 int err = 0;
485 488
486 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 489 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
487 goto exit; 490 return -ENODEV;
488
489 /*
490 * OK. For now, we presume we have a valid client. We now create the
491 * client structure, even though we cannot fill it completely yet.
492 * But it allows us to access fscpos_{read,write}_value.
493 */
494
495 if (!(data = kzalloc(sizeof(struct fscpos_data), GFP_KERNEL))) {
496 err = -ENOMEM;
497 goto exit;
498 }
499
500 new_client = &data->client;
501 i2c_set_clientdata(new_client, data);
502 new_client->addr = address;
503 new_client->adapter = adapter;
504 new_client->driver = &fscpos_driver;
505 new_client->flags = 0;
506 491
507 /* Do the remaining detection unless force or force_fscpos parameter */ 492 /* Do the remaining detection unless force or force_fscpos parameter */
508 if (kind < 0) { 493 if (kind < 0) {
@@ -512,22 +497,30 @@ static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind)
512 != 0x45) /* 'E' */ 497 != 0x45) /* 'E' */
513 || (fscpos_read_value(new_client, FSCPOS_REG_IDENT_2) 498 || (fscpos_read_value(new_client, FSCPOS_REG_IDENT_2)
514 != 0x47))/* 'G' */ 499 != 0x47))/* 'G' */
515 { 500 return -ENODEV;
516 dev_dbg(&new_client->dev, "fscpos detection failed\n");
517 goto exit_free;
518 }
519 } 501 }
520 502
521 /* Fill in the remaining client fields and put it in the global list */ 503 strlcpy(info->type, "fscpos", I2C_NAME_SIZE);
522 strlcpy(new_client->name, "fscpos", I2C_NAME_SIZE);
523 504
505 return 0;
506}
507
508static int fscpos_probe(struct i2c_client *new_client,
509 const struct i2c_device_id *id)
510{
511 struct fscpos_data *data;
512 int err;
513
514 data = kzalloc(sizeof(struct fscpos_data), GFP_KERNEL);
515 if (!data) {
516 err = -ENOMEM;
517 goto exit;
518 }
519
520 i2c_set_clientdata(new_client, data);
524 data->valid = 0; 521 data->valid = 0;
525 mutex_init(&data->update_lock); 522 mutex_init(&data->update_lock);
526 523
527 /* Tell the I2C layer a new client has arrived */
528 if ((err = i2c_attach_client(new_client)))
529 goto exit_free;
530
531 /* Inizialize the fscpos chip */ 524 /* Inizialize the fscpos chip */
532 fscpos_init_client(new_client); 525 fscpos_init_client(new_client);
533 526
@@ -536,7 +529,7 @@ static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind)
536 529
537 /* Register sysfs hooks */ 530 /* Register sysfs hooks */
538 if ((err = sysfs_create_group(&new_client->dev.kobj, &fscpos_group))) 531 if ((err = sysfs_create_group(&new_client->dev.kobj, &fscpos_group)))
539 goto exit_detach; 532 goto exit_free;
540 533
541 data->hwmon_dev = hwmon_device_register(&new_client->dev); 534 data->hwmon_dev = hwmon_device_register(&new_client->dev);
542 if (IS_ERR(data->hwmon_dev)) { 535 if (IS_ERR(data->hwmon_dev)) {
@@ -548,24 +541,19 @@ static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind)
548 541
549exit_remove_files: 542exit_remove_files:
550 sysfs_remove_group(&new_client->dev.kobj, &fscpos_group); 543 sysfs_remove_group(&new_client->dev.kobj, &fscpos_group);
551exit_detach:
552 i2c_detach_client(new_client);
553exit_free: 544exit_free:
554 kfree(data); 545 kfree(data);
555exit: 546exit:
556 return err; 547 return err;
557} 548}
558 549
559static int fscpos_detach_client(struct i2c_client *client) 550static int fscpos_remove(struct i2c_client *client)
560{ 551{
561 struct fscpos_data *data = i2c_get_clientdata(client); 552 struct fscpos_data *data = i2c_get_clientdata(client);
562 int err;
563 553
564 hwmon_device_unregister(data->hwmon_dev); 554 hwmon_device_unregister(data->hwmon_dev);
565 sysfs_remove_group(&client->dev.kobj, &fscpos_group); 555 sysfs_remove_group(&client->dev.kobj, &fscpos_group);
566 556
567 if ((err = i2c_detach_client(client)))
568 return err;
569 kfree(data); 557 kfree(data);
570 return 0; 558 return 0;
571} 559}
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 33e9e8a8d1ce..7820df45d77a 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -114,7 +114,6 @@ static inline u8 FAN_TO_REG(long rpm, int div)
114 114
115/* Each client has this additional data */ 115/* Each client has this additional data */
116struct gl518_data { 116struct gl518_data {
117 struct i2c_client client;
118 struct device *hwmon_dev; 117 struct device *hwmon_dev;
119 enum chips type; 118 enum chips type;
120 119
@@ -138,21 +137,33 @@ struct gl518_data {
138 u8 beep_enable; /* Boolean */ 137 u8 beep_enable; /* Boolean */
139}; 138};
140 139
141static int gl518_attach_adapter(struct i2c_adapter *adapter); 140static int gl518_probe(struct i2c_client *client,
142static int gl518_detect(struct i2c_adapter *adapter, int address, int kind); 141 const struct i2c_device_id *id);
142static int gl518_detect(struct i2c_client *client, int kind,
143 struct i2c_board_info *info);
143static void gl518_init_client(struct i2c_client *client); 144static void gl518_init_client(struct i2c_client *client);
144static int gl518_detach_client(struct i2c_client *client); 145static int gl518_remove(struct i2c_client *client);
145static int gl518_read_value(struct i2c_client *client, u8 reg); 146static int gl518_read_value(struct i2c_client *client, u8 reg);
146static int gl518_write_value(struct i2c_client *client, u8 reg, u16 value); 147static int gl518_write_value(struct i2c_client *client, u8 reg, u16 value);
147static struct gl518_data *gl518_update_device(struct device *dev); 148static struct gl518_data *gl518_update_device(struct device *dev);
148 149
150static const struct i2c_device_id gl518_id[] = {
151 { "gl518sm", 0 },
152 { }
153};
154MODULE_DEVICE_TABLE(i2c, gl518_id);
155
149/* This is the driver that will be inserted */ 156/* This is the driver that will be inserted */
150static struct i2c_driver gl518_driver = { 157static struct i2c_driver gl518_driver = {
158 .class = I2C_CLASS_HWMON,
151 .driver = { 159 .driver = {
152 .name = "gl518sm", 160 .name = "gl518sm",
153 }, 161 },
154 .attach_adapter = gl518_attach_adapter, 162 .probe = gl518_probe,
155 .detach_client = gl518_detach_client, 163 .remove = gl518_remove,
164 .id_table = gl518_id,
165 .detect = gl518_detect,
166 .address_data = &addr_data,
156}; 167};
157 168
158/* 169/*
@@ -472,46 +483,23 @@ static const struct attribute_group gl518_group_r80 = {
472 * Real code 483 * Real code
473 */ 484 */
474 485
475static int gl518_attach_adapter(struct i2c_adapter *adapter) 486/* Return 0 if detection is successful, -ENODEV otherwise */
476{ 487static int gl518_detect(struct i2c_client *client, int kind,
477 if (!(adapter->class & I2C_CLASS_HWMON)) 488 struct i2c_board_info *info)
478 return 0;
479 return i2c_probe(adapter, &addr_data, gl518_detect);
480}
481
482static int gl518_detect(struct i2c_adapter *adapter, int address, int kind)
483{ 489{
490 struct i2c_adapter *adapter = client->adapter;
484 int i; 491 int i;
485 struct i2c_client *client;
486 struct gl518_data *data;
487 int err = 0;
488 492
489 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | 493 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
490 I2C_FUNC_SMBUS_WORD_DATA)) 494 I2C_FUNC_SMBUS_WORD_DATA))
491 goto exit; 495 return -ENODEV;
492
493 /* OK. For now, we presume we have a valid client. We now create the
494 client structure, even though we cannot fill it completely yet.
495 But it allows us to access gl518_{read,write}_value. */
496
497 if (!(data = kzalloc(sizeof(struct gl518_data), GFP_KERNEL))) {
498 err = -ENOMEM;
499 goto exit;
500 }
501
502 client = &data->client;
503 i2c_set_clientdata(client, data);
504
505 client->addr = address;
506 client->adapter = adapter;
507 client->driver = &gl518_driver;
508 496
509 /* Now, we do the remaining detection. */ 497 /* Now, we do the remaining detection. */
510 498
511 if (kind < 0) { 499 if (kind < 0) {
512 if ((gl518_read_value(client, GL518_REG_CHIP_ID) != 0x80) 500 if ((gl518_read_value(client, GL518_REG_CHIP_ID) != 0x80)
513 || (gl518_read_value(client, GL518_REG_CONF) & 0x80)) 501 || (gl518_read_value(client, GL518_REG_CONF) & 0x80))
514 goto exit_free; 502 return -ENODEV;
515 } 503 }
516 504
517 /* Determine the chip type. */ 505 /* Determine the chip type. */
@@ -526,19 +514,32 @@ static int gl518_detect(struct i2c_adapter *adapter, int address, int kind)
526 dev_info(&adapter->dev, 514 dev_info(&adapter->dev,
527 "Ignoring 'force' parameter for unknown " 515 "Ignoring 'force' parameter for unknown "
528 "chip at adapter %d, address 0x%02x\n", 516 "chip at adapter %d, address 0x%02x\n",
529 i2c_adapter_id(adapter), address); 517 i2c_adapter_id(adapter), client->addr);
530 goto exit_free; 518 return -ENODEV;
531 } 519 }
532 } 520 }
533 521
534 /* Fill in the remaining client fields */ 522 strlcpy(info->type, "gl518sm", I2C_NAME_SIZE);
535 strlcpy(client->name, "gl518sm", I2C_NAME_SIZE);
536 data->type = kind;
537 mutex_init(&data->update_lock);
538 523
539 /* Tell the I2C layer a new client has arrived */ 524 return 0;
540 if ((err = i2c_attach_client(client))) 525}
541 goto exit_free; 526
527static int gl518_probe(struct i2c_client *client,
528 const struct i2c_device_id *id)
529{
530 struct gl518_data *data;
531 int err, revision;
532
533 data = kzalloc(sizeof(struct gl518_data), GFP_KERNEL);
534 if (!data) {
535 err = -ENOMEM;
536 goto exit;
537 }
538
539 i2c_set_clientdata(client, data);
540 revision = gl518_read_value(client, GL518_REG_REVISION);
541 data->type = revision == 0x80 ? gl518sm_r80 : gl518sm_r00;
542 mutex_init(&data->update_lock);
542 543
543 /* Initialize the GL518SM chip */ 544 /* Initialize the GL518SM chip */
544 data->alarm_mask = 0xff; 545 data->alarm_mask = 0xff;
@@ -546,7 +547,7 @@ static int gl518_detect(struct i2c_adapter *adapter, int address, int kind)
546 547
547 /* Register sysfs hooks */ 548 /* Register sysfs hooks */
548 if ((err = sysfs_create_group(&client->dev.kobj, &gl518_group))) 549 if ((err = sysfs_create_group(&client->dev.kobj, &gl518_group)))
549 goto exit_detach; 550 goto exit_free;
550 if (data->type == gl518sm_r80) 551 if (data->type == gl518sm_r80)
551 if ((err = sysfs_create_group(&client->dev.kobj, 552 if ((err = sysfs_create_group(&client->dev.kobj,
552 &gl518_group_r80))) 553 &gl518_group_r80)))
@@ -564,8 +565,6 @@ exit_remove_files:
564 sysfs_remove_group(&client->dev.kobj, &gl518_group); 565 sysfs_remove_group(&client->dev.kobj, &gl518_group);
565 if (data->type == gl518sm_r80) 566 if (data->type == gl518sm_r80)
566 sysfs_remove_group(&client->dev.kobj, &gl518_group_r80); 567 sysfs_remove_group(&client->dev.kobj, &gl518_group_r80);
567exit_detach:
568 i2c_detach_client(client);
569exit_free: 568exit_free:
570 kfree(data); 569 kfree(data);
571exit: 570exit:
@@ -591,19 +590,15 @@ static void gl518_init_client(struct i2c_client *client)
591 gl518_write_value(client, GL518_REG_CONF, 0x40 | regvalue); 590 gl518_write_value(client, GL518_REG_CONF, 0x40 | regvalue);
592} 591}
593 592
594static int gl518_detach_client(struct i2c_client *client) 593static int gl518_remove(struct i2c_client *client)
595{ 594{
596 struct gl518_data *data = i2c_get_clientdata(client); 595 struct gl518_data *data = i2c_get_clientdata(client);
597 int err;
598 596
599 hwmon_device_unregister(data->hwmon_dev); 597 hwmon_device_unregister(data->hwmon_dev);
600 sysfs_remove_group(&client->dev.kobj, &gl518_group); 598 sysfs_remove_group(&client->dev.kobj, &gl518_group);
601 if (data->type == gl518sm_r80) 599 if (data->type == gl518sm_r80)
602 sysfs_remove_group(&client->dev.kobj, &gl518_group_r80); 600 sysfs_remove_group(&client->dev.kobj, &gl518_group_r80);
603 601
604 if ((err = i2c_detach_client(client)))
605 return err;
606
607 kfree(data); 602 kfree(data);
608 return 0; 603 return 0;
609} 604}
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 8984ef141627..19616f2242b0 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -79,26 +79,37 @@ static const u8 GL520_REG_TEMP_MAX_HYST[] = { 0x06, 0x18 };
79 * Function declarations 79 * Function declarations
80 */ 80 */
81 81
82static int gl520_attach_adapter(struct i2c_adapter *adapter); 82static int gl520_probe(struct i2c_client *client,
83static int gl520_detect(struct i2c_adapter *adapter, int address, int kind); 83 const struct i2c_device_id *id);
84static int gl520_detect(struct i2c_client *client, int kind,
85 struct i2c_board_info *info);
84static void gl520_init_client(struct i2c_client *client); 86static void gl520_init_client(struct i2c_client *client);
85static int gl520_detach_client(struct i2c_client *client); 87static int gl520_remove(struct i2c_client *client);
86static int gl520_read_value(struct i2c_client *client, u8 reg); 88static int gl520_read_value(struct i2c_client *client, u8 reg);
87static int gl520_write_value(struct i2c_client *client, u8 reg, u16 value); 89static int gl520_write_value(struct i2c_client *client, u8 reg, u16 value);
88static struct gl520_data *gl520_update_device(struct device *dev); 90static struct gl520_data *gl520_update_device(struct device *dev);
89 91
90/* Driver data */ 92/* Driver data */
93static const struct i2c_device_id gl520_id[] = {
94 { "gl520sm", gl520sm },
95 { }
96};
97MODULE_DEVICE_TABLE(i2c, gl520_id);
98
91static struct i2c_driver gl520_driver = { 99static struct i2c_driver gl520_driver = {
100 .class = I2C_CLASS_HWMON,
92 .driver = { 101 .driver = {
93 .name = "gl520sm", 102 .name = "gl520sm",
94 }, 103 },
95 .attach_adapter = gl520_attach_adapter, 104 .probe = gl520_probe,
96 .detach_client = gl520_detach_client, 105 .remove = gl520_remove,
106 .id_table = gl520_id,
107 .detect = gl520_detect,
108 .address_data = &addr_data,
97}; 109};
98 110
99/* Client data */ 111/* Client data */
100struct gl520_data { 112struct gl520_data {
101 struct i2c_client client;
102 struct device *hwmon_dev; 113 struct device *hwmon_dev;
103 struct mutex update_lock; 114 struct mutex update_lock;
104 char valid; /* zero until the following fields are valid */ 115 char valid; /* zero until the following fields are valid */
@@ -669,37 +680,15 @@ static const struct attribute_group gl520_group_opt = {
669 * Real code 680 * Real code
670 */ 681 */
671 682
672static int gl520_attach_adapter(struct i2c_adapter *adapter) 683/* Return 0 if detection is successful, -ENODEV otherwise */
673{ 684static int gl520_detect(struct i2c_client *client, int kind,
674 if (!(adapter->class & I2C_CLASS_HWMON)) 685 struct i2c_board_info *info)
675 return 0;
676 return i2c_probe(adapter, &addr_data, gl520_detect);
677}
678
679static int gl520_detect(struct i2c_adapter *adapter, int address, int kind)
680{ 686{
681 struct i2c_client *client; 687 struct i2c_adapter *adapter = client->adapter;
682 struct gl520_data *data;
683 int err = 0;
684 688
685 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | 689 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
686 I2C_FUNC_SMBUS_WORD_DATA)) 690 I2C_FUNC_SMBUS_WORD_DATA))
687 goto exit; 691 return -ENODEV;
688
689 /* OK. For now, we presume we have a valid client. We now create the
690 client structure, even though we cannot fill it completely yet.
691 But it allows us to access gl520_{read,write}_value. */
692
693 if (!(data = kzalloc(sizeof(struct gl520_data), GFP_KERNEL))) {
694 err = -ENOMEM;
695 goto exit;
696 }
697
698 client = &data->client;
699 i2c_set_clientdata(client, data);
700 client->addr = address;
701 client->adapter = adapter;
702 client->driver = &gl520_driver;
703 692
704 /* Determine the chip type. */ 693 /* Determine the chip type. */
705 if (kind < 0) { 694 if (kind < 0) {
@@ -707,24 +696,36 @@ static int gl520_detect(struct i2c_adapter *adapter, int address, int kind)
707 ((gl520_read_value(client, GL520_REG_REVISION) & 0x7f) != 0x00) || 696 ((gl520_read_value(client, GL520_REG_REVISION) & 0x7f) != 0x00) ||
708 ((gl520_read_value(client, GL520_REG_CONF) & 0x80) != 0x00)) { 697 ((gl520_read_value(client, GL520_REG_CONF) & 0x80) != 0x00)) {
709 dev_dbg(&client->dev, "Unknown chip type, skipping\n"); 698 dev_dbg(&client->dev, "Unknown chip type, skipping\n");
710 goto exit_free; 699 return -ENODEV;
711 } 700 }
712 } 701 }
713 702
714 /* Fill in the remaining client fields */ 703 strlcpy(info->type, "gl520sm", I2C_NAME_SIZE);
715 strlcpy(client->name, "gl520sm", I2C_NAME_SIZE);
716 mutex_init(&data->update_lock);
717 704
718 /* Tell the I2C layer a new client has arrived */ 705 return 0;
719 if ((err = i2c_attach_client(client))) 706}
720 goto exit_free; 707
708static int gl520_probe(struct i2c_client *client,
709 const struct i2c_device_id *id)
710{
711 struct gl520_data *data;
712 int err;
713
714 data = kzalloc(sizeof(struct gl520_data), GFP_KERNEL);
715 if (!data) {
716 err = -ENOMEM;
717 goto exit;
718 }
719
720 i2c_set_clientdata(client, data);
721 mutex_init(&data->update_lock);
721 722
722 /* Initialize the GL520SM chip */ 723 /* Initialize the GL520SM chip */
723 gl520_init_client(client); 724 gl520_init_client(client);
724 725
725 /* Register sysfs hooks */ 726 /* Register sysfs hooks */
726 if ((err = sysfs_create_group(&client->dev.kobj, &gl520_group))) 727 if ((err = sysfs_create_group(&client->dev.kobj, &gl520_group)))
727 goto exit_detach; 728 goto exit_free;
728 729
729 if (data->two_temps) { 730 if (data->two_temps) {
730 if ((err = device_create_file(&client->dev, 731 if ((err = device_create_file(&client->dev,
@@ -764,8 +765,6 @@ static int gl520_detect(struct i2c_adapter *adapter, int address, int kind)
764exit_remove_files: 765exit_remove_files:
765 sysfs_remove_group(&client->dev.kobj, &gl520_group); 766 sysfs_remove_group(&client->dev.kobj, &gl520_group);
766 sysfs_remove_group(&client->dev.kobj, &gl520_group_opt); 767 sysfs_remove_group(&client->dev.kobj, &gl520_group_opt);
767exit_detach:
768 i2c_detach_client(client);
769exit_free: 768exit_free:
770 kfree(data); 769 kfree(data);
771exit: 770exit:
@@ -811,18 +810,14 @@ static void gl520_init_client(struct i2c_client *client)
811 gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask); 810 gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask);
812} 811}
813 812
814static int gl520_detach_client(struct i2c_client *client) 813static int gl520_remove(struct i2c_client *client)
815{ 814{
816 struct gl520_data *data = i2c_get_clientdata(client); 815 struct gl520_data *data = i2c_get_clientdata(client);
817 int err;
818 816
819 hwmon_device_unregister(data->hwmon_dev); 817 hwmon_device_unregister(data->hwmon_dev);
820 sysfs_remove_group(&client->dev.kobj, &gl520_group); 818 sysfs_remove_group(&client->dev.kobj, &gl520_group);
821 sysfs_remove_group(&client->dev.kobj, &gl520_group_opt); 819 sysfs_remove_group(&client->dev.kobj, &gl520_group_opt);
822 820
823 if ((err = i2c_detach_client(client)))
824 return err;
825
826 kfree(data); 821 kfree(data);
827 return 0; 822 return 0;
828} 823}
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 116287008083..3195a265f0e9 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * lm63.c - driver for the National Semiconductor LM63 temperature sensor 2 * lm63.c - driver for the National Semiconductor LM63 temperature sensor
3 * with integrated fan control 3 * with integrated fan control
4 * Copyright (C) 2004-2006 Jean Delvare <khali@linux-fr.org> 4 * Copyright (C) 2004-2008 Jean Delvare <khali@linux-fr.org>
5 * Based on the lm90 driver. 5 * Based on the lm90 driver.
6 * 6 *
7 * The LM63 is a sensor chip made by National Semiconductor. It measures 7 * The LM63 is a sensor chip made by National Semiconductor. It measures
@@ -128,24 +128,36 @@ I2C_CLIENT_INSMOD_1(lm63);
128 * Functions declaration 128 * Functions declaration
129 */ 129 */
130 130
131static int lm63_attach_adapter(struct i2c_adapter *adapter); 131static int lm63_probe(struct i2c_client *client,
132static int lm63_detach_client(struct i2c_client *client); 132 const struct i2c_device_id *id);
133static int lm63_remove(struct i2c_client *client);
133 134
134static struct lm63_data *lm63_update_device(struct device *dev); 135static struct lm63_data *lm63_update_device(struct device *dev);
135 136
136static int lm63_detect(struct i2c_adapter *adapter, int address, int kind); 137static int lm63_detect(struct i2c_client *client, int kind,
138 struct i2c_board_info *info);
137static void lm63_init_client(struct i2c_client *client); 139static void lm63_init_client(struct i2c_client *client);
138 140
139/* 141/*
140 * Driver data (common to all clients) 142 * Driver data (common to all clients)
141 */ 143 */
142 144
145static const struct i2c_device_id lm63_id[] = {
146 { "lm63", lm63 },
147 { }
148};
149MODULE_DEVICE_TABLE(i2c, lm63_id);
150
143static struct i2c_driver lm63_driver = { 151static struct i2c_driver lm63_driver = {
152 .class = I2C_CLASS_HWMON,
144 .driver = { 153 .driver = {
145 .name = "lm63", 154 .name = "lm63",
146 }, 155 },
147 .attach_adapter = lm63_attach_adapter, 156 .probe = lm63_probe,
148 .detach_client = lm63_detach_client, 157 .remove = lm63_remove,
158 .id_table = lm63_id,
159 .detect = lm63_detect,
160 .address_data = &addr_data,
149}; 161};
150 162
151/* 163/*
@@ -153,7 +165,6 @@ static struct i2c_driver lm63_driver = {
153 */ 165 */
154 166
155struct lm63_data { 167struct lm63_data {
156 struct i2c_client client;
157 struct device *hwmon_dev; 168 struct device *hwmon_dev;
158 struct mutex update_lock; 169 struct mutex update_lock;
159 char valid; /* zero until following fields are valid */ 170 char valid; /* zero until following fields are valid */
@@ -411,43 +422,14 @@ static const struct attribute_group lm63_group_fan1 = {
411 * Real code 422 * Real code
412 */ 423 */
413 424
414static int lm63_attach_adapter(struct i2c_adapter *adapter) 425/* Return 0 if detection is successful, -ENODEV otherwise */
426static int lm63_detect(struct i2c_client *new_client, int kind,
427 struct i2c_board_info *info)
415{ 428{
416 if (!(adapter->class & I2C_CLASS_HWMON)) 429 struct i2c_adapter *adapter = new_client->adapter;
417 return 0;
418 return i2c_probe(adapter, &addr_data, lm63_detect);
419}
420
421/*
422 * The following function does more than just detection. If detection
423 * succeeds, it also registers the new chip.
424 */
425static int lm63_detect(struct i2c_adapter *adapter, int address, int kind)
426{
427 struct i2c_client *new_client;
428 struct lm63_data *data;
429 int err = 0;
430 430
431 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 431 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
432 goto exit; 432 return -ENODEV;
433
434 if (!(data = kzalloc(sizeof(struct lm63_data), GFP_KERNEL))) {
435 err = -ENOMEM;
436 goto exit;
437 }
438
439 /* The common I2C client data is placed right before the
440 LM63-specific data. */
441 new_client = &data->client;
442 i2c_set_clientdata(new_client, data);
443 new_client->addr = address;
444 new_client->adapter = adapter;
445 new_client->driver = &lm63_driver;
446 new_client->flags = 0;
447
448 /* Default to an LM63 if forced */
449 if (kind == 0)
450 kind = lm63;
451 433
452 if (kind < 0) { /* must identify */ 434 if (kind < 0) { /* must identify */
453 u8 man_id, chip_id, reg_config1, reg_config2; 435 u8 man_id, chip_id, reg_config1, reg_config2;
@@ -477,25 +459,38 @@ static int lm63_detect(struct i2c_adapter *adapter, int address, int kind)
477 dev_dbg(&adapter->dev, "Unsupported chip " 459 dev_dbg(&adapter->dev, "Unsupported chip "
478 "(man_id=0x%02X, chip_id=0x%02X).\n", 460 "(man_id=0x%02X, chip_id=0x%02X).\n",
479 man_id, chip_id); 461 man_id, chip_id);
480 goto exit_free; 462 return -ENODEV;
481 } 463 }
482 } 464 }
483 465
484 strlcpy(new_client->name, "lm63", I2C_NAME_SIZE); 466 strlcpy(info->type, "lm63", I2C_NAME_SIZE);
467
468 return 0;
469}
470
471static int lm63_probe(struct i2c_client *new_client,
472 const struct i2c_device_id *id)
473{
474 struct lm63_data *data;
475 int err;
476
477 data = kzalloc(sizeof(struct lm63_data), GFP_KERNEL);
478 if (!data) {
479 err = -ENOMEM;
480 goto exit;
481 }
482
483 i2c_set_clientdata(new_client, data);
485 data->valid = 0; 484 data->valid = 0;
486 mutex_init(&data->update_lock); 485 mutex_init(&data->update_lock);
487 486
488 /* Tell the I2C layer a new client has arrived */
489 if ((err = i2c_attach_client(new_client)))
490 goto exit_free;
491
492 /* Initialize the LM63 chip */ 487 /* Initialize the LM63 chip */
493 lm63_init_client(new_client); 488 lm63_init_client(new_client);
494 489
495 /* Register sysfs hooks */ 490 /* Register sysfs hooks */
496 if ((err = sysfs_create_group(&new_client->dev.kobj, 491 if ((err = sysfs_create_group(&new_client->dev.kobj,
497 &lm63_group))) 492 &lm63_group)))
498 goto exit_detach; 493 goto exit_free;
499 if (data->config & 0x04) { /* tachometer enabled */ 494 if (data->config & 0x04) { /* tachometer enabled */
500 if ((err = sysfs_create_group(&new_client->dev.kobj, 495 if ((err = sysfs_create_group(&new_client->dev.kobj,
501 &lm63_group_fan1))) 496 &lm63_group_fan1)))
@@ -513,8 +508,6 @@ static int lm63_detect(struct i2c_adapter *adapter, int address, int kind)
513exit_remove_files: 508exit_remove_files:
514 sysfs_remove_group(&new_client->dev.kobj, &lm63_group); 509 sysfs_remove_group(&new_client->dev.kobj, &lm63_group);
515 sysfs_remove_group(&new_client->dev.kobj, &lm63_group_fan1); 510 sysfs_remove_group(&new_client->dev.kobj, &lm63_group_fan1);
516exit_detach:
517 i2c_detach_client(new_client);
518exit_free: 511exit_free:
519 kfree(data); 512 kfree(data);
520exit: 513exit:
@@ -556,18 +549,14 @@ static void lm63_init_client(struct i2c_client *client)
556 (data->config_fan & 0x20) ? "manual" : "auto"); 549 (data->config_fan & 0x20) ? "manual" : "auto");
557} 550}
558 551
559static int lm63_detach_client(struct i2c_client *client) 552static int lm63_remove(struct i2c_client *client)
560{ 553{
561 struct lm63_data *data = i2c_get_clientdata(client); 554 struct lm63_data *data = i2c_get_clientdata(client);
562 int err;
563 555
564 hwmon_device_unregister(data->hwmon_dev); 556 hwmon_device_unregister(data->hwmon_dev);
565 sysfs_remove_group(&client->dev.kobj, &lm63_group); 557 sysfs_remove_group(&client->dev.kobj, &lm63_group);
566 sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1); 558 sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1);
567 559
568 if ((err = i2c_detach_client(client)))
569 return err;
570
571 kfree(data); 560 kfree(data);
572 return 0; 561 return 0;
573} 562}
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 36d5a8c3ad8c..866b401ab6e8 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -52,7 +52,6 @@ I2C_CLIENT_INSMOD_1(lm77);
52 52
53/* Each client has this additional data */ 53/* Each client has this additional data */
54struct lm77_data { 54struct lm77_data {
55 struct i2c_client client;
56 struct device *hwmon_dev; 55 struct device *hwmon_dev;
57 struct mutex update_lock; 56 struct mutex update_lock;
58 char valid; 57 char valid;
@@ -65,23 +64,35 @@ struct lm77_data {
65 u8 alarms; 64 u8 alarms;
66}; 65};
67 66
68static int lm77_attach_adapter(struct i2c_adapter *adapter); 67static int lm77_probe(struct i2c_client *client,
69static int lm77_detect(struct i2c_adapter *adapter, int address, int kind); 68 const struct i2c_device_id *id);
69static int lm77_detect(struct i2c_client *client, int kind,
70 struct i2c_board_info *info);
70static void lm77_init_client(struct i2c_client *client); 71static void lm77_init_client(struct i2c_client *client);
71static int lm77_detach_client(struct i2c_client *client); 72static int lm77_remove(struct i2c_client *client);
72static u16 lm77_read_value(struct i2c_client *client, u8 reg); 73static u16 lm77_read_value(struct i2c_client *client, u8 reg);
73static int lm77_write_value(struct i2c_client *client, u8 reg, u16 value); 74static int lm77_write_value(struct i2c_client *client, u8 reg, u16 value);
74 75
75static struct lm77_data *lm77_update_device(struct device *dev); 76static struct lm77_data *lm77_update_device(struct device *dev);
76 77
77 78
79static const struct i2c_device_id lm77_id[] = {
80 { "lm77", lm77 },
81 { }
82};
83MODULE_DEVICE_TABLE(i2c, lm77_id);
84
78/* This is the driver that will be inserted */ 85/* This is the driver that will be inserted */
79static struct i2c_driver lm77_driver = { 86static struct i2c_driver lm77_driver = {
87 .class = I2C_CLASS_HWMON,
80 .driver = { 88 .driver = {
81 .name = "lm77", 89 .name = "lm77",
82 }, 90 },
83 .attach_adapter = lm77_attach_adapter, 91 .probe = lm77_probe,
84 .detach_client = lm77_detach_client, 92 .remove = lm77_remove,
93 .id_table = lm77_id,
94 .detect = lm77_detect,
95 .address_data = &addr_data,
85}; 96};
86 97
87/* straight from the datasheet */ 98/* straight from the datasheet */
@@ -215,13 +226,6 @@ static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2);
215static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0); 226static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
216static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1); 227static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
217 228
218static int lm77_attach_adapter(struct i2c_adapter *adapter)
219{
220 if (!(adapter->class & I2C_CLASS_HWMON))
221 return 0;
222 return i2c_probe(adapter, &addr_data, lm77_detect);
223}
224
225static struct attribute *lm77_attributes[] = { 229static struct attribute *lm77_attributes[] = {
226 &dev_attr_temp1_input.attr, 230 &dev_attr_temp1_input.attr,
227 &dev_attr_temp1_crit.attr, 231 &dev_attr_temp1_crit.attr,
@@ -240,32 +244,15 @@ static const struct attribute_group lm77_group = {
240 .attrs = lm77_attributes, 244 .attrs = lm77_attributes,
241}; 245};
242 246
243/* This function is called by i2c_probe */ 247/* Return 0 if detection is successful, -ENODEV otherwise */
244static int lm77_detect(struct i2c_adapter *adapter, int address, int kind) 248static int lm77_detect(struct i2c_client *new_client, int kind,
249 struct i2c_board_info *info)
245{ 250{
246 struct i2c_client *new_client; 251 struct i2c_adapter *adapter = new_client->adapter;
247 struct lm77_data *data;
248 int err = 0;
249 const char *name = "";
250 252
251 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | 253 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
252 I2C_FUNC_SMBUS_WORD_DATA)) 254 I2C_FUNC_SMBUS_WORD_DATA))
253 goto exit; 255 return -ENODEV;
254
255 /* OK. For now, we presume we have a valid client. We now create the
256 client structure, even though we cannot fill it completely yet.
257 But it allows us to access lm77_{read,write}_value. */
258 if (!(data = kzalloc(sizeof(struct lm77_data), GFP_KERNEL))) {
259 err = -ENOMEM;
260 goto exit;
261 }
262
263 new_client = &data->client;
264 i2c_set_clientdata(new_client, data);
265 new_client->addr = address;
266 new_client->adapter = adapter;
267 new_client->driver = &lm77_driver;
268 new_client->flags = 0;
269 256
270 /* Here comes the remaining detection. Since the LM77 has no 257 /* Here comes the remaining detection. Since the LM77 has no
271 register dedicated to identification, we have to rely on the 258 register dedicated to identification, we have to rely on the
@@ -294,7 +281,7 @@ static int lm77_detect(struct i2c_adapter *adapter, int address, int kind)
294 || i2c_smbus_read_word_data(new_client, i + 3) != crit 281 || i2c_smbus_read_word_data(new_client, i + 3) != crit
295 || i2c_smbus_read_word_data(new_client, i + 4) != min 282 || i2c_smbus_read_word_data(new_client, i + 4) != min
296 || i2c_smbus_read_word_data(new_client, i + 5) != max) 283 || i2c_smbus_read_word_data(new_client, i + 5) != max)
297 goto exit_free; 284 return -ENODEV;
298 285
299 /* sign bits */ 286 /* sign bits */
300 if (((cur & 0x00f0) != 0xf0 && (cur & 0x00f0) != 0x0) 287 if (((cur & 0x00f0) != 0xf0 && (cur & 0x00f0) != 0x0)
@@ -302,51 +289,55 @@ static int lm77_detect(struct i2c_adapter *adapter, int address, int kind)
302 || ((crit & 0x00f0) != 0xf0 && (crit & 0x00f0) != 0x0) 289 || ((crit & 0x00f0) != 0xf0 && (crit & 0x00f0) != 0x0)
303 || ((min & 0x00f0) != 0xf0 && (min & 0x00f0) != 0x0) 290 || ((min & 0x00f0) != 0xf0 && (min & 0x00f0) != 0x0)
304 || ((max & 0x00f0) != 0xf0 && (max & 0x00f0) != 0x0)) 291 || ((max & 0x00f0) != 0xf0 && (max & 0x00f0) != 0x0))
305 goto exit_free; 292 return -ENODEV;
306 293
307 /* unused bits */ 294 /* unused bits */
308 if (conf & 0xe0) 295 if (conf & 0xe0)
309 goto exit_free; 296 return -ENODEV;
310 297
311 /* 0x06 and 0x07 return the last read value */ 298 /* 0x06 and 0x07 return the last read value */
312 cur = i2c_smbus_read_word_data(new_client, 0); 299 cur = i2c_smbus_read_word_data(new_client, 0);
313 if (i2c_smbus_read_word_data(new_client, 6) != cur 300 if (i2c_smbus_read_word_data(new_client, 6) != cur
314 || i2c_smbus_read_word_data(new_client, 7) != cur) 301 || i2c_smbus_read_word_data(new_client, 7) != cur)
315 goto exit_free; 302 return -ENODEV;
316 hyst = i2c_smbus_read_word_data(new_client, 2); 303 hyst = i2c_smbus_read_word_data(new_client, 2);
317 if (i2c_smbus_read_word_data(new_client, 6) != hyst 304 if (i2c_smbus_read_word_data(new_client, 6) != hyst
318 || i2c_smbus_read_word_data(new_client, 7) != hyst) 305 || i2c_smbus_read_word_data(new_client, 7) != hyst)
319 goto exit_free; 306 return -ENODEV;
320 min = i2c_smbus_read_word_data(new_client, 4); 307 min = i2c_smbus_read_word_data(new_client, 4);
321 if (i2c_smbus_read_word_data(new_client, 6) != min 308 if (i2c_smbus_read_word_data(new_client, 6) != min
322 || i2c_smbus_read_word_data(new_client, 7) != min) 309 || i2c_smbus_read_word_data(new_client, 7) != min)
323 goto exit_free; 310 return -ENODEV;
324 311
325 } 312 }
326 313
327 /* Determine the chip type - only one kind supported! */ 314 strlcpy(info->type, "lm77", I2C_NAME_SIZE);
328 if (kind <= 0)
329 kind = lm77;
330 315
331 if (kind == lm77) { 316 return 0;
332 name = "lm77"; 317}
318
319static int lm77_probe(struct i2c_client *new_client,
320 const struct i2c_device_id *id)
321{
322 struct lm77_data *data;
323 int err;
324
325 data = kzalloc(sizeof(struct lm77_data), GFP_KERNEL);
326 if (!data) {
327 err = -ENOMEM;
328 goto exit;
333 } 329 }
334 330
335 /* Fill in the remaining client fields and put it into the global list */ 331 i2c_set_clientdata(new_client, data);
336 strlcpy(new_client->name, name, I2C_NAME_SIZE);
337 data->valid = 0; 332 data->valid = 0;
338 mutex_init(&data->update_lock); 333 mutex_init(&data->update_lock);
339 334
340 /* Tell the I2C layer a new client has arrived */
341 if ((err = i2c_attach_client(new_client)))
342 goto exit_free;
343
344 /* Initialize the LM77 chip */ 335 /* Initialize the LM77 chip */
345 lm77_init_client(new_client); 336 lm77_init_client(new_client);
346 337
347 /* Register sysfs hooks */ 338 /* Register sysfs hooks */
348 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm77_group))) 339 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm77_group)))
349 goto exit_detach; 340 goto exit_free;
350 341
351 data->hwmon_dev = hwmon_device_register(&new_client->dev); 342 data->hwmon_dev = hwmon_device_register(&new_client->dev);
352 if (IS_ERR(data->hwmon_dev)) { 343 if (IS_ERR(data->hwmon_dev)) {
@@ -358,20 +349,17 @@ static int lm77_detect(struct i2c_adapter *adapter, int address, int kind)
358 349
359exit_remove: 350exit_remove:
360 sysfs_remove_group(&new_client->dev.kobj, &lm77_group); 351 sysfs_remove_group(&new_client->dev.kobj, &lm77_group);
361exit_detach:
362 i2c_detach_client(new_client);
363exit_free: 352exit_free:
364 kfree(data); 353 kfree(data);
365exit: 354exit:
366 return err; 355 return err;
367} 356}
368 357
369static int lm77_detach_client(struct i2c_client *client) 358static int lm77_remove(struct i2c_client *client)
370{ 359{
371 struct lm77_data *data = i2c_get_clientdata(client); 360 struct lm77_data *data = i2c_get_clientdata(client);
372 hwmon_device_unregister(data->hwmon_dev); 361 hwmon_device_unregister(data->hwmon_dev);
373 sysfs_remove_group(&client->dev.kobj, &lm77_group); 362 sysfs_remove_group(&client->dev.kobj, &lm77_group);
374 i2c_detach_client(client);
375 kfree(data); 363 kfree(data);
376 return 0; 364 return 0;
377} 365}
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 26c91c9d4769..bcffc1899403 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -108,7 +108,6 @@ static inline long TEMP_FROM_REG(u16 temp)
108 */ 108 */
109 109
110struct lm80_data { 110struct lm80_data {
111 struct i2c_client client;
112 struct device *hwmon_dev; 111 struct device *hwmon_dev;
113 struct mutex update_lock; 112 struct mutex update_lock;
114 char valid; /* !=0 if following fields are valid */ 113 char valid; /* !=0 if following fields are valid */
@@ -132,10 +131,12 @@ struct lm80_data {
132 * Functions declaration 131 * Functions declaration
133 */ 132 */
134 133
135static int lm80_attach_adapter(struct i2c_adapter *adapter); 134static int lm80_probe(struct i2c_client *client,
136static int lm80_detect(struct i2c_adapter *adapter, int address, int kind); 135 const struct i2c_device_id *id);
136static int lm80_detect(struct i2c_client *client, int kind,
137 struct i2c_board_info *info);
137static void lm80_init_client(struct i2c_client *client); 138static void lm80_init_client(struct i2c_client *client);
138static int lm80_detach_client(struct i2c_client *client); 139static int lm80_remove(struct i2c_client *client);
139static struct lm80_data *lm80_update_device(struct device *dev); 140static struct lm80_data *lm80_update_device(struct device *dev);
140static int lm80_read_value(struct i2c_client *client, u8 reg); 141static int lm80_read_value(struct i2c_client *client, u8 reg);
141static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value); 142static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value);
@@ -144,12 +145,22 @@ static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value);
144 * Driver data (common to all clients) 145 * Driver data (common to all clients)
145 */ 146 */
146 147
148static const struct i2c_device_id lm80_id[] = {
149 { "lm80", lm80 },
150 { }
151};
152MODULE_DEVICE_TABLE(i2c, lm80_id);
153
147static struct i2c_driver lm80_driver = { 154static struct i2c_driver lm80_driver = {
155 .class = I2C_CLASS_HWMON,
148 .driver = { 156 .driver = {
149 .name = "lm80", 157 .name = "lm80",
150 }, 158 },
151 .attach_adapter = lm80_attach_adapter, 159 .probe = lm80_probe,
152 .detach_client = lm80_detach_client, 160 .remove = lm80_remove,
161 .id_table = lm80_id,
162 .detect = lm80_detect,
163 .address_data = &addr_data,
153}; 164};
154 165
155/* 166/*
@@ -383,13 +394,6 @@ static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 13);
383 * Real code 394 * Real code
384 */ 395 */
385 396
386static int lm80_attach_adapter(struct i2c_adapter *adapter)
387{
388 if (!(adapter->class & I2C_CLASS_HWMON))
389 return 0;
390 return i2c_probe(adapter, &addr_data, lm80_detect);
391}
392
393static struct attribute *lm80_attributes[] = { 397static struct attribute *lm80_attributes[] = {
394 &sensor_dev_attr_in0_min.dev_attr.attr, 398 &sensor_dev_attr_in0_min.dev_attr.attr,
395 &sensor_dev_attr_in1_min.dev_attr.attr, 399 &sensor_dev_attr_in1_min.dev_attr.attr,
@@ -442,53 +446,46 @@ static const struct attribute_group lm80_group = {
442 .attrs = lm80_attributes, 446 .attrs = lm80_attributes,
443}; 447};
444 448
445static int lm80_detect(struct i2c_adapter *adapter, int address, int kind) 449/* Return 0 if detection is successful, -ENODEV otherwise */
450static int lm80_detect(struct i2c_client *client, int kind,
451 struct i2c_board_info *info)
446{ 452{
453 struct i2c_adapter *adapter = client->adapter;
447 int i, cur; 454 int i, cur;
448 struct i2c_client *client;
449 struct lm80_data *data;
450 int err = 0;
451 const char *name;
452 455
453 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 456 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
454 goto exit; 457 return -ENODEV;
455
456 /* OK. For now, we presume we have a valid client. We now create the
457 client structure, even though we cannot fill it completely yet.
458 But it allows us to access lm80_{read,write}_value. */
459 if (!(data = kzalloc(sizeof(struct lm80_data), GFP_KERNEL))) {
460 err = -ENOMEM;
461 goto exit;
462 }
463
464 client = &data->client;
465 i2c_set_clientdata(client, data);
466 client->addr = address;
467 client->adapter = adapter;
468 client->driver = &lm80_driver;
469 458
470 /* Now, we do the remaining detection. It is lousy. */ 459 /* Now, we do the remaining detection. It is lousy. */
471 if (lm80_read_value(client, LM80_REG_ALARM2) & 0xc0) 460 if (lm80_read_value(client, LM80_REG_ALARM2) & 0xc0)
472 goto error_free; 461 return -ENODEV;
473 for (i = 0x2a; i <= 0x3d; i++) { 462 for (i = 0x2a; i <= 0x3d; i++) {
474 cur = i2c_smbus_read_byte_data(client, i); 463 cur = i2c_smbus_read_byte_data(client, i);
475 if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur) 464 if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur)
476 || (i2c_smbus_read_byte_data(client, i + 0x80) != cur) 465 || (i2c_smbus_read_byte_data(client, i + 0x80) != cur)
477 || (i2c_smbus_read_byte_data(client, i + 0xc0) != cur)) 466 || (i2c_smbus_read_byte_data(client, i + 0xc0) != cur))
478 goto error_free; 467 return -ENODEV;
479 } 468 }
480 469
481 /* Determine the chip type - only one kind supported! */ 470 strlcpy(info->type, "lm80", I2C_NAME_SIZE);
482 kind = lm80;
483 name = "lm80";
484 471
485 /* Fill in the remaining client fields */ 472 return 0;
486 strlcpy(client->name, name, I2C_NAME_SIZE); 473}
487 mutex_init(&data->update_lock);
488 474
489 /* Tell the I2C layer a new client has arrived */ 475static int lm80_probe(struct i2c_client *client,
490 if ((err = i2c_attach_client(client))) 476 const struct i2c_device_id *id)
491 goto error_free; 477{
478 struct lm80_data *data;
479 int err;
480
481 data = kzalloc(sizeof(struct lm80_data), GFP_KERNEL);
482 if (!data) {
483 err = -ENOMEM;
484 goto exit;
485 }
486
487 i2c_set_clientdata(client, data);
488 mutex_init(&data->update_lock);
492 489
493 /* Initialize the LM80 chip */ 490 /* Initialize the LM80 chip */
494 lm80_init_client(client); 491 lm80_init_client(client);
@@ -499,7 +496,7 @@ static int lm80_detect(struct i2c_adapter *adapter, int address, int kind)
499 496
500 /* Register sysfs hooks */ 497 /* Register sysfs hooks */
501 if ((err = sysfs_create_group(&client->dev.kobj, &lm80_group))) 498 if ((err = sysfs_create_group(&client->dev.kobj, &lm80_group)))
502 goto error_detach; 499 goto error_free;
503 500
504 data->hwmon_dev = hwmon_device_register(&client->dev); 501 data->hwmon_dev = hwmon_device_register(&client->dev);
505 if (IS_ERR(data->hwmon_dev)) { 502 if (IS_ERR(data->hwmon_dev)) {
@@ -511,23 +508,18 @@ static int lm80_detect(struct i2c_adapter *adapter, int address, int kind)
511 508
512error_remove: 509error_remove:
513 sysfs_remove_group(&client->dev.kobj, &lm80_group); 510 sysfs_remove_group(&client->dev.kobj, &lm80_group);
514error_detach:
515 i2c_detach_client(client);
516error_free: 511error_free:
517 kfree(data); 512 kfree(data);
518exit: 513exit:
519 return err; 514 return err;
520} 515}
521 516
522static int lm80_detach_client(struct i2c_client *client) 517static int lm80_remove(struct i2c_client *client)
523{ 518{
524 struct lm80_data *data = i2c_get_clientdata(client); 519 struct lm80_data *data = i2c_get_clientdata(client);
525 int err;
526 520
527 hwmon_device_unregister(data->hwmon_dev); 521 hwmon_device_unregister(data->hwmon_dev);
528 sysfs_remove_group(&client->dev.kobj, &lm80_group); 522 sysfs_remove_group(&client->dev.kobj, &lm80_group);
529 if ((err = i2c_detach_client(client)))
530 return err;
531 523
532 kfree(data); 524 kfree(data);
533 return 0; 525 return 0;
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 6a8642fa25fb..e59e2d1f080c 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * lm83.c - Part of lm_sensors, Linux kernel modules for hardware 2 * lm83.c - Part of lm_sensors, Linux kernel modules for hardware
3 * monitoring 3 * monitoring
4 * Copyright (C) 2003-2006 Jean Delvare <khali@linux-fr.org> 4 * Copyright (C) 2003-2008 Jean Delvare <khali@linux-fr.org>
5 * 5 *
6 * Heavily inspired from the lm78, lm75 and adm1021 drivers. The LM83 is 6 * Heavily inspired from the lm78, lm75 and adm1021 drivers. The LM83 is
7 * a sensor chip made by National Semiconductor. It reports up to four 7 * a sensor chip made by National Semiconductor. It reports up to four
@@ -118,21 +118,34 @@ static const u8 LM83_REG_W_HIGH[] = {
118 * Functions declaration 118 * Functions declaration
119 */ 119 */
120 120
121static int lm83_attach_adapter(struct i2c_adapter *adapter); 121static int lm83_detect(struct i2c_client *new_client, int kind,
122static int lm83_detect(struct i2c_adapter *adapter, int address, int kind); 122 struct i2c_board_info *info);
123static int lm83_detach_client(struct i2c_client *client); 123static int lm83_probe(struct i2c_client *client,
124 const struct i2c_device_id *id);
125static int lm83_remove(struct i2c_client *client);
124static struct lm83_data *lm83_update_device(struct device *dev); 126static struct lm83_data *lm83_update_device(struct device *dev);
125 127
126/* 128/*
127 * Driver data (common to all clients) 129 * Driver data (common to all clients)
128 */ 130 */
129 131
132static const struct i2c_device_id lm83_id[] = {
133 { "lm83", lm83 },
134 { "lm82", lm82 },
135 { }
136};
137MODULE_DEVICE_TABLE(i2c, lm83_id);
138
130static struct i2c_driver lm83_driver = { 139static struct i2c_driver lm83_driver = {
140 .class = I2C_CLASS_HWMON,
131 .driver = { 141 .driver = {
132 .name = "lm83", 142 .name = "lm83",
133 }, 143 },
134 .attach_adapter = lm83_attach_adapter, 144 .probe = lm83_probe,
135 .detach_client = lm83_detach_client, 145 .remove = lm83_remove,
146 .id_table = lm83_id,
147 .detect = lm83_detect,
148 .address_data = &addr_data,
136}; 149};
137 150
138/* 151/*
@@ -140,7 +153,6 @@ static struct i2c_driver lm83_driver = {
140 */ 153 */
141 154
142struct lm83_data { 155struct lm83_data {
143 struct i2c_client client;
144 struct device *hwmon_dev; 156 struct device *hwmon_dev;
145 struct mutex update_lock; 157 struct mutex update_lock;
146 char valid; /* zero until following fields are valid */ 158 char valid; /* zero until following fields are valid */
@@ -278,40 +290,15 @@ static const struct attribute_group lm83_group_opt = {
278 * Real code 290 * Real code
279 */ 291 */
280 292
281static int lm83_attach_adapter(struct i2c_adapter *adapter) 293/* Return 0 if detection is successful, -ENODEV otherwise */
294static int lm83_detect(struct i2c_client *new_client, int kind,
295 struct i2c_board_info *info)
282{ 296{
283 if (!(adapter->class & I2C_CLASS_HWMON)) 297 struct i2c_adapter *adapter = new_client->adapter;
284 return 0;
285 return i2c_probe(adapter, &addr_data, lm83_detect);
286}
287
288/*
289 * The following function does more than just detection. If detection
290 * succeeds, it also registers the new chip.
291 */
292static int lm83_detect(struct i2c_adapter *adapter, int address, int kind)
293{
294 struct i2c_client *new_client;
295 struct lm83_data *data;
296 int err = 0;
297 const char *name = ""; 298 const char *name = "";
298 299
299 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 300 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
300 goto exit; 301 return -ENODEV;
301
302 if (!(data = kzalloc(sizeof(struct lm83_data), GFP_KERNEL))) {
303 err = -ENOMEM;
304 goto exit;
305 }
306
307 /* The common I2C client data is placed right after the
308 * LM83-specific data. */
309 new_client = &data->client;
310 i2c_set_clientdata(new_client, data);
311 new_client->addr = address;
312 new_client->adapter = adapter;
313 new_client->driver = &lm83_driver;
314 new_client->flags = 0;
315 302
316 /* Now we do the detection and identification. A negative kind 303 /* Now we do the detection and identification. A negative kind
317 * means that the driver was loaded with no force parameter 304 * means that the driver was loaded with no force parameter
@@ -335,8 +322,9 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind)
335 ((i2c_smbus_read_byte_data(new_client, LM83_REG_R_CONFIG) 322 ((i2c_smbus_read_byte_data(new_client, LM83_REG_R_CONFIG)
336 & 0x41) != 0x00)) { 323 & 0x41) != 0x00)) {
337 dev_dbg(&adapter->dev, 324 dev_dbg(&adapter->dev,
338 "LM83 detection failed at 0x%02x.\n", address); 325 "LM83 detection failed at 0x%02x.\n",
339 goto exit_free; 326 new_client->addr);
327 return -ENODEV;
340 } 328 }
341 } 329 }
342 330
@@ -361,7 +349,7 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind)
361 dev_info(&adapter->dev, 349 dev_info(&adapter->dev,
362 "Unsupported chip (man_id=0x%02X, " 350 "Unsupported chip (man_id=0x%02X, "
363 "chip_id=0x%02X).\n", man_id, chip_id); 351 "chip_id=0x%02X).\n", man_id, chip_id);
364 goto exit_free; 352 return -ENODEV;
365 } 353 }
366 } 354 }
367 355
@@ -372,15 +360,27 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind)
372 name = "lm82"; 360 name = "lm82";
373 } 361 }
374 362
375 /* We can fill in the remaining client fields */ 363 strlcpy(info->type, name, I2C_NAME_SIZE);
376 strlcpy(new_client->name, name, I2C_NAME_SIZE); 364
365 return 0;
366}
367
368static int lm83_probe(struct i2c_client *new_client,
369 const struct i2c_device_id *id)
370{
371 struct lm83_data *data;
372 int err;
373
374 data = kzalloc(sizeof(struct lm83_data), GFP_KERNEL);
375 if (!data) {
376 err = -ENOMEM;
377 goto exit;
378 }
379
380 i2c_set_clientdata(new_client, data);
377 data->valid = 0; 381 data->valid = 0;
378 mutex_init(&data->update_lock); 382 mutex_init(&data->update_lock);
379 383
380 /* Tell the I2C layer a new client has arrived */
381 if ((err = i2c_attach_client(new_client)))
382 goto exit_free;
383
384 /* 384 /*
385 * Register sysfs hooks 385 * Register sysfs hooks
386 * The LM82 can only monitor one external diode which is 386 * The LM82 can only monitor one external diode which is
@@ -389,9 +389,9 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind)
389 */ 389 */
390 390
391 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm83_group))) 391 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm83_group)))
392 goto exit_detach; 392 goto exit_free;
393 393
394 if (kind == lm83) { 394 if (id->driver_data == lm83) {
395 if ((err = sysfs_create_group(&new_client->dev.kobj, 395 if ((err = sysfs_create_group(&new_client->dev.kobj,
396 &lm83_group_opt))) 396 &lm83_group_opt)))
397 goto exit_remove_files; 397 goto exit_remove_files;
@@ -408,26 +408,20 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind)
408exit_remove_files: 408exit_remove_files:
409 sysfs_remove_group(&new_client->dev.kobj, &lm83_group); 409 sysfs_remove_group(&new_client->dev.kobj, &lm83_group);
410 sysfs_remove_group(&new_client->dev.kobj, &lm83_group_opt); 410 sysfs_remove_group(&new_client->dev.kobj, &lm83_group_opt);
411exit_detach:
412 i2c_detach_client(new_client);
413exit_free: 411exit_free:
414 kfree(data); 412 kfree(data);
415exit: 413exit:
416 return err; 414 return err;
417} 415}
418 416
419static int lm83_detach_client(struct i2c_client *client) 417static int lm83_remove(struct i2c_client *client)
420{ 418{
421 struct lm83_data *data = i2c_get_clientdata(client); 419 struct lm83_data *data = i2c_get_clientdata(client);
422 int err;
423 420
424 hwmon_device_unregister(data->hwmon_dev); 421 hwmon_device_unregister(data->hwmon_dev);
425 sysfs_remove_group(&client->dev.kobj, &lm83_group); 422 sysfs_remove_group(&client->dev.kobj, &lm83_group);
426 sysfs_remove_group(&client->dev.kobj, &lm83_group_opt); 423 sysfs_remove_group(&client->dev.kobj, &lm83_group_opt);
427 424
428 if ((err = i2c_detach_client(client)))
429 return err;
430
431 kfree(data); 425 kfree(data);
432 return 0; 426 return 0;
433} 427}
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index e1c183f0aae0..21970f0d53a1 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -5,7 +5,7 @@
5 * Philip Edelbrock <phil@netroedge.com> 5 * Philip Edelbrock <phil@netroedge.com>
6 * Stephen Rousset <stephen.rousset@rocketlogix.com> 6 * Stephen Rousset <stephen.rousset@rocketlogix.com>
7 * Dan Eaton <dan.eaton@rocketlogix.com> 7 * Dan Eaton <dan.eaton@rocketlogix.com>
8 * Copyright (C) 2004,2007 Jean Delvare <khali@linux-fr.org> 8 * Copyright (C) 2004-2008 Jean Delvare <khali@linux-fr.org>
9 * 9 *
10 * Original port to Linux 2.6 by Jeff Oliver. 10 * Original port to Linux 2.6 by Jeff Oliver.
11 * 11 *
@@ -157,22 +157,35 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
157 * Functions declaration 157 * Functions declaration
158 */ 158 */
159 159
160static int lm87_attach_adapter(struct i2c_adapter *adapter); 160static int lm87_probe(struct i2c_client *client,
161static int lm87_detect(struct i2c_adapter *adapter, int address, int kind); 161 const struct i2c_device_id *id);
162static int lm87_detect(struct i2c_client *new_client, int kind,
163 struct i2c_board_info *info);
162static void lm87_init_client(struct i2c_client *client); 164static void lm87_init_client(struct i2c_client *client);
163static int lm87_detach_client(struct i2c_client *client); 165static int lm87_remove(struct i2c_client *client);
164static struct lm87_data *lm87_update_device(struct device *dev); 166static struct lm87_data *lm87_update_device(struct device *dev);
165 167
166/* 168/*
167 * Driver data (common to all clients) 169 * Driver data (common to all clients)
168 */ 170 */
169 171
172static const struct i2c_device_id lm87_id[] = {
173 { "lm87", lm87 },
174 { "adm1024", adm1024 },
175 { }
176};
177MODULE_DEVICE_TABLE(i2c, lm87_id);
178
170static struct i2c_driver lm87_driver = { 179static struct i2c_driver lm87_driver = {
180 .class = I2C_CLASS_HWMON,
171 .driver = { 181 .driver = {
172 .name = "lm87", 182 .name = "lm87",
173 }, 183 },
174 .attach_adapter = lm87_attach_adapter, 184 .probe = lm87_probe,
175 .detach_client = lm87_detach_client, 185 .remove = lm87_remove,
186 .id_table = lm87_id,
187 .detect = lm87_detect,
188 .address_data = &addr_data,
176}; 189};
177 190
178/* 191/*
@@ -180,7 +193,6 @@ static struct i2c_driver lm87_driver = {
180 */ 193 */
181 194
182struct lm87_data { 195struct lm87_data {
183 struct i2c_client client;
184 struct device *hwmon_dev; 196 struct device *hwmon_dev;
185 struct mutex update_lock; 197 struct mutex update_lock;
186 char valid; /* zero until following fields are valid */ 198 char valid; /* zero until following fields are valid */
@@ -562,13 +574,6 @@ static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 15);
562 * Real code 574 * Real code
563 */ 575 */
564 576
565static int lm87_attach_adapter(struct i2c_adapter *adapter)
566{
567 if (!(adapter->class & I2C_CLASS_HWMON))
568 return 0;
569 return i2c_probe(adapter, &addr_data, lm87_detect);
570}
571
572static struct attribute *lm87_attributes[] = { 577static struct attribute *lm87_attributes[] = {
573 &dev_attr_in1_input.attr, 578 &dev_attr_in1_input.attr,
574 &dev_attr_in1_min.attr, 579 &dev_attr_in1_min.attr,
@@ -656,33 +661,15 @@ static const struct attribute_group lm87_group_opt = {
656 .attrs = lm87_attributes_opt, 661 .attrs = lm87_attributes_opt,
657}; 662};
658 663
659/* 664/* Return 0 if detection is successful, -ENODEV otherwise */
660 * The following function does more than just detection. If detection 665static int lm87_detect(struct i2c_client *new_client, int kind,
661 * succeeds, it also registers the new chip. 666 struct i2c_board_info *info)
662 */
663static int lm87_detect(struct i2c_adapter *adapter, int address, int kind)
664{ 667{
665 struct i2c_client *new_client; 668 struct i2c_adapter *adapter = new_client->adapter;
666 struct lm87_data *data;
667 int err = 0;
668 static const char *names[] = { "lm87", "adm1024" }; 669 static const char *names[] = { "lm87", "adm1024" };
669 670
670 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 671 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
671 goto exit; 672 return -ENODEV;
672
673 if (!(data = kzalloc(sizeof(struct lm87_data), GFP_KERNEL))) {
674 err = -ENOMEM;
675 goto exit;
676 }
677
678 /* The common I2C client data is placed right before the
679 LM87-specific data. */
680 new_client = &data->client;
681 i2c_set_clientdata(new_client, data);
682 new_client->addr = address;
683 new_client->adapter = adapter;
684 new_client->driver = &lm87_driver;
685 new_client->flags = 0;
686 673
687 /* Default to an LM87 if forced */ 674 /* Default to an LM87 if forced */
688 if (kind == 0) 675 if (kind == 0)
@@ -704,20 +691,32 @@ static int lm87_detect(struct i2c_adapter *adapter, int address, int kind)
704 || (lm87_read_value(new_client, LM87_REG_CONFIG) & 0x80)) { 691 || (lm87_read_value(new_client, LM87_REG_CONFIG) & 0x80)) {
705 dev_dbg(&adapter->dev, 692 dev_dbg(&adapter->dev,
706 "LM87 detection failed at 0x%02x.\n", 693 "LM87 detection failed at 0x%02x.\n",
707 address); 694 new_client->addr);
708 goto exit_free; 695 return -ENODEV;
709 } 696 }
710 } 697 }
711 698
712 /* We can fill in the remaining client fields */ 699 strlcpy(info->type, names[kind - 1], I2C_NAME_SIZE);
713 strlcpy(new_client->name, names[kind - 1], I2C_NAME_SIZE); 700
701 return 0;
702}
703
704static int lm87_probe(struct i2c_client *new_client,
705 const struct i2c_device_id *id)
706{
707 struct lm87_data *data;
708 int err;
709
710 data = kzalloc(sizeof(struct lm87_data), GFP_KERNEL);
711 if (!data) {
712 err = -ENOMEM;
713 goto exit;
714 }
715
716 i2c_set_clientdata(new_client, data);
714 data->valid = 0; 717 data->valid = 0;
715 mutex_init(&data->update_lock); 718 mutex_init(&data->update_lock);
716 719
717 /* Tell the I2C layer a new client has arrived */
718 if ((err = i2c_attach_client(new_client)))
719 goto exit_free;
720
721 /* Initialize the LM87 chip */ 720 /* Initialize the LM87 chip */
722 lm87_init_client(new_client); 721 lm87_init_client(new_client);
723 722
@@ -732,7 +731,7 @@ static int lm87_detect(struct i2c_adapter *adapter, int address, int kind)
732 731
733 /* Register sysfs hooks */ 732 /* Register sysfs hooks */
734 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm87_group))) 733 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm87_group)))
735 goto exit_detach; 734 goto exit_free;
736 735
737 if (data->channel & CHAN_NO_FAN(0)) { 736 if (data->channel & CHAN_NO_FAN(0)) {
738 if ((err = device_create_file(&new_client->dev, 737 if ((err = device_create_file(&new_client->dev,
@@ -832,8 +831,6 @@ static int lm87_detect(struct i2c_adapter *adapter, int address, int kind)
832exit_remove: 831exit_remove:
833 sysfs_remove_group(&new_client->dev.kobj, &lm87_group); 832 sysfs_remove_group(&new_client->dev.kobj, &lm87_group);
834 sysfs_remove_group(&new_client->dev.kobj, &lm87_group_opt); 833 sysfs_remove_group(&new_client->dev.kobj, &lm87_group_opt);
835exit_detach:
836 i2c_detach_client(new_client);
837exit_free: 834exit_free:
838 kfree(data); 835 kfree(data);
839exit: 836exit:
@@ -877,18 +874,14 @@ static void lm87_init_client(struct i2c_client *client)
877 } 874 }
878} 875}
879 876
880static int lm87_detach_client(struct i2c_client *client) 877static int lm87_remove(struct i2c_client *client)
881{ 878{
882 struct lm87_data *data = i2c_get_clientdata(client); 879 struct lm87_data *data = i2c_get_clientdata(client);
883 int err;
884 880
885 hwmon_device_unregister(data->hwmon_dev); 881 hwmon_device_unregister(data->hwmon_dev);
886 sysfs_remove_group(&client->dev.kobj, &lm87_group); 882 sysfs_remove_group(&client->dev.kobj, &lm87_group);
887 sysfs_remove_group(&client->dev.kobj, &lm87_group_opt); 883 sysfs_remove_group(&client->dev.kobj, &lm87_group_opt);
888 884
889 if ((err = i2c_detach_client(client)))
890 return err;
891
892 kfree(data); 885 kfree(data);
893 return 0; 886 return 0;
894} 887}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index d1a3da3dd8e0..c24fe36ac787 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -187,23 +187,44 @@ I2C_CLIENT_INSMOD_7(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680);
187 * Functions declaration 187 * Functions declaration
188 */ 188 */
189 189
190static int lm90_attach_adapter(struct i2c_adapter *adapter); 190static int lm90_detect(struct i2c_client *client, int kind,
191static int lm90_detect(struct i2c_adapter *adapter, int address, 191 struct i2c_board_info *info);
192 int kind); 192static int lm90_probe(struct i2c_client *client,
193 const struct i2c_device_id *id);
193static void lm90_init_client(struct i2c_client *client); 194static void lm90_init_client(struct i2c_client *client);
194static int lm90_detach_client(struct i2c_client *client); 195static int lm90_remove(struct i2c_client *client);
195static struct lm90_data *lm90_update_device(struct device *dev); 196static struct lm90_data *lm90_update_device(struct device *dev);
196 197
197/* 198/*
198 * Driver data (common to all clients) 199 * Driver data (common to all clients)
199 */ 200 */
200 201
202static const struct i2c_device_id lm90_id[] = {
203 { "adm1032", adm1032 },
204 { "adt7461", adt7461 },
205 { "lm90", lm90 },
206 { "lm86", lm86 },
207 { "lm89", lm99 },
208 { "lm99", lm99 }, /* Missing temperature offset */
209 { "max6657", max6657 },
210 { "max6658", max6657 },
211 { "max6659", max6657 },
212 { "max6680", max6680 },
213 { "max6681", max6680 },
214 { }
215};
216MODULE_DEVICE_TABLE(i2c, lm90_id);
217
201static struct i2c_driver lm90_driver = { 218static struct i2c_driver lm90_driver = {
219 .class = I2C_CLASS_HWMON,
202 .driver = { 220 .driver = {
203 .name = "lm90", 221 .name = "lm90",
204 }, 222 },
205 .attach_adapter = lm90_attach_adapter, 223 .probe = lm90_probe,
206 .detach_client = lm90_detach_client, 224 .remove = lm90_remove,
225 .id_table = lm90_id,
226 .detect = lm90_detect,
227 .address_data = &addr_data,
207}; 228};
208 229
209/* 230/*
@@ -211,7 +232,6 @@ static struct i2c_driver lm90_driver = {
211 */ 232 */
212 233
213struct lm90_data { 234struct lm90_data {
214 struct i2c_client client;
215 struct device *hwmon_dev; 235 struct device *hwmon_dev;
216 struct mutex update_lock; 236 struct mutex update_lock;
217 char valid; /* zero until following fields are valid */ 237 char valid; /* zero until following fields are valid */
@@ -477,40 +497,16 @@ static int lm90_read_reg(struct i2c_client* client, u8 reg, u8 *value)
477 return 0; 497 return 0;
478} 498}
479 499
480static int lm90_attach_adapter(struct i2c_adapter *adapter) 500/* Return 0 if detection is successful, -ENODEV otherwise */
481{ 501static int lm90_detect(struct i2c_client *new_client, int kind,
482 if (!(adapter->class & I2C_CLASS_HWMON)) 502 struct i2c_board_info *info)
483 return 0;
484 return i2c_probe(adapter, &addr_data, lm90_detect);
485}
486
487/*
488 * The following function does more than just detection. If detection
489 * succeeds, it also registers the new chip.
490 */
491static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
492{ 503{
493 struct i2c_client *new_client; 504 struct i2c_adapter *adapter = new_client->adapter;
494 struct lm90_data *data; 505 int address = new_client->addr;
495 int err = 0;
496 const char *name = ""; 506 const char *name = "";
497 507
498 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 508 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
499 goto exit; 509 return -ENODEV;
500
501 if (!(data = kzalloc(sizeof(struct lm90_data), GFP_KERNEL))) {
502 err = -ENOMEM;
503 goto exit;
504 }
505
506 /* The common I2C client data is placed right before the
507 LM90-specific data. */
508 new_client = &data->client;
509 i2c_set_clientdata(new_client, data);
510 new_client->addr = address;
511 new_client->adapter = adapter;
512 new_client->driver = &lm90_driver;
513 new_client->flags = 0;
514 510
515 /* 511 /*
516 * Now we do the remaining detection. A negative kind means that 512 * Now we do the remaining detection. A negative kind means that
@@ -538,7 +534,7 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
538 LM90_REG_R_CONFIG1)) < 0 534 LM90_REG_R_CONFIG1)) < 0
539 || (reg_convrate = i2c_smbus_read_byte_data(new_client, 535 || (reg_convrate = i2c_smbus_read_byte_data(new_client,
540 LM90_REG_R_CONVRATE)) < 0) 536 LM90_REG_R_CONVRATE)) < 0)
541 goto exit_free; 537 return -ENODEV;
542 538
543 if ((address == 0x4C || address == 0x4D) 539 if ((address == 0x4C || address == 0x4D)
544 && man_id == 0x01) { /* National Semiconductor */ 540 && man_id == 0x01) { /* National Semiconductor */
@@ -546,7 +542,7 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
546 542
547 if ((reg_config2 = i2c_smbus_read_byte_data(new_client, 543 if ((reg_config2 = i2c_smbus_read_byte_data(new_client,
548 LM90_REG_R_CONFIG2)) < 0) 544 LM90_REG_R_CONFIG2)) < 0)
549 goto exit_free; 545 return -ENODEV;
550 546
551 if ((reg_config1 & 0x2A) == 0x00 547 if ((reg_config1 & 0x2A) == 0x00
552 && (reg_config2 & 0xF8) == 0x00 548 && (reg_config2 & 0xF8) == 0x00
@@ -610,10 +606,11 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
610 dev_info(&adapter->dev, 606 dev_info(&adapter->dev,
611 "Unsupported chip (man_id=0x%02X, " 607 "Unsupported chip (man_id=0x%02X, "
612 "chip_id=0x%02X).\n", man_id, chip_id); 608 "chip_id=0x%02X).\n", man_id, chip_id);
613 goto exit_free; 609 return -ENODEV;
614 } 610 }
615 } 611 }
616 612
613 /* Fill the i2c board info */
617 if (kind == lm90) { 614 if (kind == lm90) {
618 name = "lm90"; 615 name = "lm90";
619 } else if (kind == adm1032) { 616 } else if (kind == adm1032) {
@@ -621,7 +618,7 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
621 /* The ADM1032 supports PEC, but only if combined 618 /* The ADM1032 supports PEC, but only if combined
622 transactions are not used. */ 619 transactions are not used. */
623 if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) 620 if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
624 new_client->flags |= I2C_CLIENT_PEC; 621 info->flags |= I2C_CLIENT_PEC;
625 } else if (kind == lm99) { 622 } else if (kind == lm99) {
626 name = "lm99"; 623 name = "lm99";
627 } else if (kind == lm86) { 624 } else if (kind == lm86) {
@@ -633,23 +630,39 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
633 } else if (kind == adt7461) { 630 } else if (kind == adt7461) {
634 name = "adt7461"; 631 name = "adt7461";
635 } 632 }
633 strlcpy(info->type, name, I2C_NAME_SIZE);
634
635 return 0;
636}
637
638static int lm90_probe(struct i2c_client *new_client,
639 const struct i2c_device_id *id)
640{
641 struct i2c_adapter *adapter = to_i2c_adapter(new_client->dev.parent);
642 struct lm90_data *data;
643 int err;
636 644
637 /* We can fill in the remaining client fields */ 645 data = kzalloc(sizeof(struct lm90_data), GFP_KERNEL);
638 strlcpy(new_client->name, name, I2C_NAME_SIZE); 646 if (!data) {
639 data->valid = 0; 647 err = -ENOMEM;
640 data->kind = kind; 648 goto exit;
649 }
650 i2c_set_clientdata(new_client, data);
641 mutex_init(&data->update_lock); 651 mutex_init(&data->update_lock);
642 652
643 /* Tell the I2C layer a new client has arrived */ 653 /* Set the device type */
644 if ((err = i2c_attach_client(new_client))) 654 data->kind = id->driver_data;
645 goto exit_free; 655 if (data->kind == adm1032) {
656 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
657 new_client->flags &= ~I2C_CLIENT_PEC;
658 }
646 659
647 /* Initialize the LM90 chip */ 660 /* Initialize the LM90 chip */
648 lm90_init_client(new_client); 661 lm90_init_client(new_client);
649 662
650 /* Register sysfs hooks */ 663 /* Register sysfs hooks */
651 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm90_group))) 664 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm90_group)))
652 goto exit_detach; 665 goto exit_free;
653 if (new_client->flags & I2C_CLIENT_PEC) { 666 if (new_client->flags & I2C_CLIENT_PEC) {
654 if ((err = device_create_file(&new_client->dev, 667 if ((err = device_create_file(&new_client->dev,
655 &dev_attr_pec))) 668 &dev_attr_pec)))
@@ -672,8 +685,6 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
672exit_remove_files: 685exit_remove_files:
673 sysfs_remove_group(&new_client->dev.kobj, &lm90_group); 686 sysfs_remove_group(&new_client->dev.kobj, &lm90_group);
674 device_remove_file(&new_client->dev, &dev_attr_pec); 687 device_remove_file(&new_client->dev, &dev_attr_pec);
675exit_detach:
676 i2c_detach_client(new_client);
677exit_free: 688exit_free:
678 kfree(data); 689 kfree(data);
679exit: 690exit:
@@ -710,10 +721,9 @@ static void lm90_init_client(struct i2c_client *client)
710 i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config); 721 i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
711} 722}
712 723
713static int lm90_detach_client(struct i2c_client *client) 724static int lm90_remove(struct i2c_client *client)
714{ 725{
715 struct lm90_data *data = i2c_get_clientdata(client); 726 struct lm90_data *data = i2c_get_clientdata(client);
716 int err;
717 727
718 hwmon_device_unregister(data->hwmon_dev); 728 hwmon_device_unregister(data->hwmon_dev);
719 sysfs_remove_group(&client->dev.kobj, &lm90_group); 729 sysfs_remove_group(&client->dev.kobj, &lm90_group);
@@ -722,9 +732,6 @@ static int lm90_detach_client(struct i2c_client *client)
722 device_remove_file(&client->dev, 732 device_remove_file(&client->dev,
723 &sensor_dev_attr_temp2_offset.dev_attr); 733 &sensor_dev_attr_temp2_offset.dev_attr);
724 734
725 if ((err = i2c_detach_client(client)))
726 return err;
727
728 kfree(data); 735 kfree(data);
729 return 0; 736 return 0;
730} 737}
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index c31942e08246..b2e00c5a7eec 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * lm92 - Hardware monitoring driver 2 * lm92 - Hardware monitoring driver
3 * Copyright (C) 2005 Jean Delvare <khali@linux-fr.org> 3 * Copyright (C) 2005-2008 Jean Delvare <khali@linux-fr.org>
4 * 4 *
5 * Based on the lm90 driver, with some ideas taken from the lm_sensors 5 * Based on the lm90 driver, with some ideas taken from the lm_sensors
6 * lm92 driver as well. 6 * lm92 driver as well.
@@ -96,7 +96,6 @@ static struct i2c_driver lm92_driver;
96 96
97/* Client data (each client gets its own) */ 97/* Client data (each client gets its own) */
98struct lm92_data { 98struct lm92_data {
99 struct i2c_client client;
100 struct device *hwmon_dev; 99 struct device *hwmon_dev;
101 struct mutex update_lock; 100 struct mutex update_lock;
102 char valid; /* zero until following fields are valid */ 101 char valid; /* zero until following fields are valid */
@@ -319,32 +318,15 @@ static const struct attribute_group lm92_group = {
319 .attrs = lm92_attributes, 318 .attrs = lm92_attributes,
320}; 319};
321 320
322/* The following function does more than just detection. If detection 321/* Return 0 if detection is successful, -ENODEV otherwise */
323 succeeds, it also registers the new chip. */ 322static int lm92_detect(struct i2c_client *new_client, int kind,
324static int lm92_detect(struct i2c_adapter *adapter, int address, int kind) 323 struct i2c_board_info *info)
325{ 324{
326 struct i2c_client *new_client; 325 struct i2c_adapter *adapter = new_client->adapter;
327 struct lm92_data *data;
328 int err = 0;
329 char *name;
330 326
331 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA 327 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
332 | I2C_FUNC_SMBUS_WORD_DATA)) 328 | I2C_FUNC_SMBUS_WORD_DATA))
333 goto exit; 329 return -ENODEV;
334
335 if (!(data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL))) {
336 err = -ENOMEM;
337 goto exit;
338 }
339
340 /* Fill in enough client fields so that we can read from the chip,
341 which is required for identication */
342 new_client = &data->client;
343 i2c_set_clientdata(new_client, data);
344 new_client->addr = address;
345 new_client->adapter = adapter;
346 new_client->driver = &lm92_driver;
347 new_client->flags = 0;
348 330
349 /* A negative kind means that the driver was loaded with no force 331 /* A negative kind means that the driver was loaded with no force
350 parameter (default), so we must identify the chip. */ 332 parameter (default), so we must identify the chip. */
@@ -364,34 +346,36 @@ static int lm92_detect(struct i2c_adapter *adapter, int address, int kind)
364 kind = lm92; /* No separate prefix */ 346 kind = lm92; /* No separate prefix */
365 } 347 }
366 else 348 else
367 goto exit_free; 349 return -ENODEV;
368 } else
369 if (kind == 0) /* Default to an LM92 if forced */
370 kind = lm92;
371
372 /* Give it the proper name */
373 if (kind == lm92) {
374 name = "lm92";
375 } else { /* Supposedly cannot happen */
376 dev_dbg(&new_client->dev, "Kind out of range?\n");
377 goto exit_free;
378 } 350 }
379 351
380 /* Fill in the remaining client fields */ 352 strlcpy(info->type, "lm92", I2C_NAME_SIZE);
381 strlcpy(new_client->name, name, I2C_NAME_SIZE); 353
354 return 0;
355}
356
357static int lm92_probe(struct i2c_client *new_client,
358 const struct i2c_device_id *id)
359{
360 struct lm92_data *data;
361 int err;
362
363 data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL);
364 if (!data) {
365 err = -ENOMEM;
366 goto exit;
367 }
368
369 i2c_set_clientdata(new_client, data);
382 data->valid = 0; 370 data->valid = 0;
383 mutex_init(&data->update_lock); 371 mutex_init(&data->update_lock);
384 372
385 /* Tell the i2c subsystem a new client has arrived */
386 if ((err = i2c_attach_client(new_client)))
387 goto exit_free;
388
389 /* Initialize the chipset */ 373 /* Initialize the chipset */
390 lm92_init_client(new_client); 374 lm92_init_client(new_client);
391 375
392 /* Register sysfs hooks */ 376 /* Register sysfs hooks */
393 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm92_group))) 377 if ((err = sysfs_create_group(&new_client->dev.kobj, &lm92_group)))
394 goto exit_detach; 378 goto exit_free;
395 379
396 data->hwmon_dev = hwmon_device_register(&new_client->dev); 380 data->hwmon_dev = hwmon_device_register(&new_client->dev);
397 if (IS_ERR(data->hwmon_dev)) { 381 if (IS_ERR(data->hwmon_dev)) {
@@ -403,32 +387,19 @@ static int lm92_detect(struct i2c_adapter *adapter, int address, int kind)
403 387
404exit_remove: 388exit_remove:
405 sysfs_remove_group(&new_client->dev.kobj, &lm92_group); 389 sysfs_remove_group(&new_client->dev.kobj, &lm92_group);
406exit_detach:
407 i2c_detach_client(new_client);
408exit_free: 390exit_free:
409 kfree(data); 391 kfree(data);
410exit: 392exit:
411 return err; 393 return err;
412} 394}
413 395
414static int lm92_attach_adapter(struct i2c_adapter *adapter) 396static int lm92_remove(struct i2c_client *client)
415{
416 if (!(adapter->class & I2C_CLASS_HWMON))
417 return 0;
418 return i2c_probe(adapter, &addr_data, lm92_detect);
419}
420
421static int lm92_detach_client(struct i2c_client *client)
422{ 397{
423 struct lm92_data *data = i2c_get_clientdata(client); 398 struct lm92_data *data = i2c_get_clientdata(client);
424 int err;
425 399
426 hwmon_device_unregister(data->hwmon_dev); 400 hwmon_device_unregister(data->hwmon_dev);
427 sysfs_remove_group(&client->dev.kobj, &lm92_group); 401 sysfs_remove_group(&client->dev.kobj, &lm92_group);
428 402
429 if ((err = i2c_detach_client(client)))
430 return err;
431
432 kfree(data); 403 kfree(data);
433 return 0; 404 return 0;
434} 405}
@@ -438,12 +409,23 @@ static int lm92_detach_client(struct i2c_client *client)
438 * Module and driver stuff 409 * Module and driver stuff
439 */ 410 */
440 411
412static const struct i2c_device_id lm92_id[] = {
413 { "lm92", lm92 },
414 /* max6635 could be added here */
415 { }
416};
417MODULE_DEVICE_TABLE(i2c, lm92_id);
418
441static struct i2c_driver lm92_driver = { 419static struct i2c_driver lm92_driver = {
420 .class = I2C_CLASS_HWMON,
442 .driver = { 421 .driver = {
443 .name = "lm92", 422 .name = "lm92",
444 }, 423 },
445 .attach_adapter = lm92_attach_adapter, 424 .probe = lm92_probe,
446 .detach_client = lm92_detach_client, 425 .remove = lm92_remove,
426 .id_table = lm92_id,
427 .detect = lm92_detect,
428 .address_data = &addr_data,
447}; 429};
448 430
449static int __init sensors_lm92_init(void) 431static int __init sensors_lm92_init(void)
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 5e678f5c883d..fc36cadf36fb 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -200,7 +200,6 @@ struct block1_t {
200 * Client-specific data 200 * Client-specific data
201 */ 201 */
202struct lm93_data { 202struct lm93_data {
203 struct i2c_client client;
204 struct device *hwmon_dev; 203 struct device *hwmon_dev;
205 204
206 struct mutex update_lock; 205 struct mutex update_lock;
@@ -2501,45 +2500,14 @@ static void lm93_init_client(struct i2c_client *client)
2501 "chip to signal ready!\n"); 2500 "chip to signal ready!\n");
2502} 2501}
2503 2502
2504static int lm93_detect(struct i2c_adapter *adapter, int address, int kind) 2503/* Return 0 if detection is successful, -ENODEV otherwise */
2504static int lm93_detect(struct i2c_client *client, int kind,
2505 struct i2c_board_info *info)
2505{ 2506{
2506 struct lm93_data *data; 2507 struct i2c_adapter *adapter = client->adapter;
2507 struct i2c_client *client;
2508
2509 int err = -ENODEV, func;
2510 void (*update)(struct lm93_data *, struct i2c_client *);
2511
2512 /* choose update routine based on bus capabilities */
2513 func = i2c_get_functionality(adapter);
2514 if ( ((LM93_SMBUS_FUNC_FULL & func) == LM93_SMBUS_FUNC_FULL) &&
2515 (!disable_block) ) {
2516 dev_dbg(&adapter->dev,"using SMBus block data transactions\n");
2517 update = lm93_update_client_full;
2518 } else if ((LM93_SMBUS_FUNC_MIN & func) == LM93_SMBUS_FUNC_MIN) {
2519 dev_dbg(&adapter->dev,"disabled SMBus block data "
2520 "transactions\n");
2521 update = lm93_update_client_min;
2522 } else {
2523 dev_dbg(&adapter->dev,"detect failed, "
2524 "smbus byte and/or word data not supported!\n");
2525 goto err_out;
2526 }
2527 2508
2528 /* OK. For now, we presume we have a valid client. We now create the 2509 if (!i2c_check_functionality(adapter, LM93_SMBUS_FUNC_MIN))
2529 client structure, even though we cannot fill it completely yet. 2510 return -ENODEV;
2530 But it allows us to access lm78_{read,write}_value. */
2531
2532 if ( !(data = kzalloc(sizeof(struct lm93_data), GFP_KERNEL))) {
2533 dev_dbg(&adapter->dev,"out of memory!\n");
2534 err = -ENOMEM;
2535 goto err_out;
2536 }
2537
2538 client = &data->client;
2539 i2c_set_clientdata(client, data);
2540 client->addr = address;
2541 client->adapter = adapter;
2542 client->driver = &lm93_driver;
2543 2511
2544 /* detection */ 2512 /* detection */
2545 if (kind < 0) { 2513 if (kind < 0) {
@@ -2548,7 +2516,7 @@ static int lm93_detect(struct i2c_adapter *adapter, int address, int kind)
2548 if (mfr != 0x01) { 2516 if (mfr != 0x01) {
2549 dev_dbg(&adapter->dev,"detect failed, " 2517 dev_dbg(&adapter->dev,"detect failed, "
2550 "bad manufacturer id 0x%02x!\n", mfr); 2518 "bad manufacturer id 0x%02x!\n", mfr);
2551 goto err_free; 2519 return -ENODEV;
2552 } 2520 }
2553 } 2521 }
2554 2522
@@ -2563,31 +2531,61 @@ static int lm93_detect(struct i2c_adapter *adapter, int address, int kind)
2563 if (kind == 0) 2531 if (kind == 0)
2564 dev_dbg(&adapter->dev, 2532 dev_dbg(&adapter->dev,
2565 "(ignored 'force' parameter)\n"); 2533 "(ignored 'force' parameter)\n");
2566 goto err_free; 2534 return -ENODEV;
2567 } 2535 }
2568 } 2536 }
2569 2537
2570 /* fill in remaining client fields */ 2538 strlcpy(info->type, "lm93", I2C_NAME_SIZE);
2571 strlcpy(client->name, "lm93", I2C_NAME_SIZE);
2572 dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n", 2539 dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n",
2573 client->name, i2c_adapter_id(client->adapter), 2540 client->name, i2c_adapter_id(client->adapter),
2574 client->addr); 2541 client->addr);
2575 2542
2543 return 0;
2544}
2545
2546static int lm93_probe(struct i2c_client *client,
2547 const struct i2c_device_id *id)
2548{
2549 struct lm93_data *data;
2550 int err, func;
2551 void (*update)(struct lm93_data *, struct i2c_client *);
2552
2553 /* choose update routine based on bus capabilities */
2554 func = i2c_get_functionality(client->adapter);
2555 if (((LM93_SMBUS_FUNC_FULL & func) == LM93_SMBUS_FUNC_FULL) &&
2556 (!disable_block)) {
2557 dev_dbg(&client->dev, "using SMBus block data transactions\n");
2558 update = lm93_update_client_full;
2559 } else if ((LM93_SMBUS_FUNC_MIN & func) == LM93_SMBUS_FUNC_MIN) {
2560 dev_dbg(&client->dev, "disabled SMBus block data "
2561 "transactions\n");
2562 update = lm93_update_client_min;
2563 } else {
2564 dev_dbg(&client->dev, "detect failed, "
2565 "smbus byte and/or word data not supported!\n");
2566 err = -ENODEV;
2567 goto err_out;
2568 }
2569
2570 data = kzalloc(sizeof(struct lm93_data), GFP_KERNEL);
2571 if (!data) {
2572 dev_dbg(&client->dev, "out of memory!\n");
2573 err = -ENOMEM;
2574 goto err_out;
2575 }
2576 i2c_set_clientdata(client, data);
2577
2576 /* housekeeping */ 2578 /* housekeeping */
2577 data->valid = 0; 2579 data->valid = 0;
2578 data->update = update; 2580 data->update = update;
2579 mutex_init(&data->update_lock); 2581 mutex_init(&data->update_lock);
2580 2582
2581 /* tell the I2C layer a new client has arrived */
2582 if ((err = i2c_attach_client(client)))
2583 goto err_free;
2584
2585 /* initialize the chip */ 2583 /* initialize the chip */
2586 lm93_init_client(client); 2584 lm93_init_client(client);
2587 2585
2588 err = sysfs_create_group(&client->dev.kobj, &lm93_attr_grp); 2586 err = sysfs_create_group(&client->dev.kobj, &lm93_attr_grp);
2589 if (err) 2587 if (err)
2590 goto err_detach; 2588 goto err_free;
2591 2589
2592 /* Register hwmon driver class */ 2590 /* Register hwmon driver class */
2593 data->hwmon_dev = hwmon_device_register(&client->dev); 2591 data->hwmon_dev = hwmon_device_register(&client->dev);
@@ -2597,43 +2595,39 @@ static int lm93_detect(struct i2c_adapter *adapter, int address, int kind)
2597 err = PTR_ERR(data->hwmon_dev); 2595 err = PTR_ERR(data->hwmon_dev);
2598 dev_err(&client->dev, "error registering hwmon device.\n"); 2596 dev_err(&client->dev, "error registering hwmon device.\n");
2599 sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp); 2597 sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
2600err_detach:
2601 i2c_detach_client(client);
2602err_free: 2598err_free:
2603 kfree(data); 2599 kfree(data);
2604err_out: 2600err_out:
2605 return err; 2601 return err;
2606} 2602}
2607 2603
2608/* This function is called when: 2604static int lm93_remove(struct i2c_client *client)
2609 * lm93_driver is inserted (when this module is loaded), for each
2610 available adapter
2611 * when a new adapter is inserted (and lm93_driver is still present) */
2612static int lm93_attach_adapter(struct i2c_adapter *adapter)
2613{
2614 return i2c_probe(adapter, &addr_data, lm93_detect);
2615}
2616
2617static int lm93_detach_client(struct i2c_client *client)
2618{ 2605{
2619 struct lm93_data *data = i2c_get_clientdata(client); 2606 struct lm93_data *data = i2c_get_clientdata(client);
2620 int err = 0;
2621 2607
2622 hwmon_device_unregister(data->hwmon_dev); 2608 hwmon_device_unregister(data->hwmon_dev);
2623 sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp); 2609 sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
2624 2610
2625 err = i2c_detach_client(client); 2611 kfree(data);
2626 if (!err) 2612 return 0;
2627 kfree(data);
2628 return err;
2629} 2613}
2630 2614
2615static const struct i2c_device_id lm93_id[] = {
2616 { "lm93", lm93 },
2617 { }
2618};
2619MODULE_DEVICE_TABLE(i2c, lm93_id);
2620
2631static struct i2c_driver lm93_driver = { 2621static struct i2c_driver lm93_driver = {
2622 .class = I2C_CLASS_HWMON,
2632 .driver = { 2623 .driver = {
2633 .name = "lm93", 2624 .name = "lm93",
2634 }, 2625 },
2635 .attach_adapter = lm93_attach_adapter, 2626 .probe = lm93_probe,
2636 .detach_client = lm93_detach_client, 2627 .remove = lm93_remove,
2628 .id_table = lm93_id,
2629 .detect = lm93_detect,
2630 .address_data = &addr_data,
2637}; 2631};
2638 2632
2639static int __init lm93_init(void) 2633static int __init lm93_init(void)
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 7e7267a04544..1ab1cacad598 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -79,23 +79,34 @@ I2C_CLIENT_INSMOD_1(max1619);
79 * Functions declaration 79 * Functions declaration
80 */ 80 */
81 81
82static int max1619_attach_adapter(struct i2c_adapter *adapter); 82static int max1619_probe(struct i2c_client *client,
83static int max1619_detect(struct i2c_adapter *adapter, int address, 83 const struct i2c_device_id *id);
84 int kind); 84static int max1619_detect(struct i2c_client *client, int kind,
85 struct i2c_board_info *info);
85static void max1619_init_client(struct i2c_client *client); 86static void max1619_init_client(struct i2c_client *client);
86static int max1619_detach_client(struct i2c_client *client); 87static int max1619_remove(struct i2c_client *client);
87static struct max1619_data *max1619_update_device(struct device *dev); 88static struct max1619_data *max1619_update_device(struct device *dev);
88 89
89/* 90/*
90 * Driver data (common to all clients) 91 * Driver data (common to all clients)
91 */ 92 */
92 93
94static const struct i2c_device_id max1619_id[] = {
95 { "max1619", max1619 },
96 { }
97};
98MODULE_DEVICE_TABLE(i2c, max1619_id);
99
93static struct i2c_driver max1619_driver = { 100static struct i2c_driver max1619_driver = {
101 .class = I2C_CLASS_HWMON,
94 .driver = { 102 .driver = {
95 .name = "max1619", 103 .name = "max1619",
96 }, 104 },
97 .attach_adapter = max1619_attach_adapter, 105 .probe = max1619_probe,
98 .detach_client = max1619_detach_client, 106 .remove = max1619_remove,
107 .id_table = max1619_id,
108 .detect = max1619_detect,
109 .address_data = &addr_data,
99}; 110};
100 111
101/* 112/*
@@ -103,7 +114,6 @@ static struct i2c_driver max1619_driver = {
103 */ 114 */
104 115
105struct max1619_data { 116struct max1619_data {
106 struct i2c_client client;
107 struct device *hwmon_dev; 117 struct device *hwmon_dev;
108 struct mutex update_lock; 118 struct mutex update_lock;
109 char valid; /* zero until following fields are valid */ 119 char valid; /* zero until following fields are valid */
@@ -208,41 +218,15 @@ static const struct attribute_group max1619_group = {
208 * Real code 218 * Real code
209 */ 219 */
210 220
211static int max1619_attach_adapter(struct i2c_adapter *adapter) 221/* Return 0 if detection is successful, -ENODEV otherwise */
212{ 222static int max1619_detect(struct i2c_client *new_client, int kind,
213 if (!(adapter->class & I2C_CLASS_HWMON)) 223 struct i2c_board_info *info)
214 return 0;
215 return i2c_probe(adapter, &addr_data, max1619_detect);
216}
217
218/*
219 * The following function does more than just detection. If detection
220 * succeeds, it also registers the new chip.
221 */
222static int max1619_detect(struct i2c_adapter *adapter, int address, int kind)
223{ 224{
224 struct i2c_client *new_client; 225 struct i2c_adapter *adapter = new_client->adapter;
225 struct max1619_data *data;
226 int err = 0;
227 const char *name = "";
228 u8 reg_config=0, reg_convrate=0, reg_status=0; 226 u8 reg_config=0, reg_convrate=0, reg_status=0;
229 227
230 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 228 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
231 goto exit; 229 return -ENODEV;
232
233 if (!(data = kzalloc(sizeof(struct max1619_data), GFP_KERNEL))) {
234 err = -ENOMEM;
235 goto exit;
236 }
237
238 /* The common I2C client data is placed right before the
239 MAX1619-specific data. */
240 new_client = &data->client;
241 i2c_set_clientdata(new_client, data);
242 new_client->addr = address;
243 new_client->adapter = adapter;
244 new_client->driver = &max1619_driver;
245 new_client->flags = 0;
246 230
247 /* 231 /*
248 * Now we do the remaining detection. A negative kind means that 232 * Now we do the remaining detection. A negative kind means that
@@ -265,8 +249,8 @@ static int max1619_detect(struct i2c_adapter *adapter, int address, int kind)
265 || reg_convrate > 0x07 || (reg_status & 0x61 ) !=0x00) { 249 || reg_convrate > 0x07 || (reg_status & 0x61 ) !=0x00) {
266 dev_dbg(&adapter->dev, 250 dev_dbg(&adapter->dev,
267 "MAX1619 detection failed at 0x%02x.\n", 251 "MAX1619 detection failed at 0x%02x.\n",
268 address); 252 new_client->addr);
269 goto exit_free; 253 return -ENODEV;
270 } 254 }
271 } 255 }
272 256
@@ -285,28 +269,37 @@ static int max1619_detect(struct i2c_adapter *adapter, int address, int kind)
285 dev_info(&adapter->dev, 269 dev_info(&adapter->dev,
286 "Unsupported chip (man_id=0x%02X, " 270 "Unsupported chip (man_id=0x%02X, "
287 "chip_id=0x%02X).\n", man_id, chip_id); 271 "chip_id=0x%02X).\n", man_id, chip_id);
288 goto exit_free; 272 return -ENODEV;
289 } 273 }
290 } 274 }
291 275
292 if (kind == max1619) 276 strlcpy(info->type, "max1619", I2C_NAME_SIZE);
293 name = "max1619"; 277
278 return 0;
279}
280
281static int max1619_probe(struct i2c_client *new_client,
282 const struct i2c_device_id *id)
283{
284 struct max1619_data *data;
285 int err;
286
287 data = kzalloc(sizeof(struct max1619_data), GFP_KERNEL);
288 if (!data) {
289 err = -ENOMEM;
290 goto exit;
291 }
294 292
295 /* We can fill in the remaining client fields */ 293 i2c_set_clientdata(new_client, data);
296 strlcpy(new_client->name, name, I2C_NAME_SIZE);
297 data->valid = 0; 294 data->valid = 0;
298 mutex_init(&data->update_lock); 295 mutex_init(&data->update_lock);
299 296
300 /* Tell the I2C layer a new client has arrived */
301 if ((err = i2c_attach_client(new_client)))
302 goto exit_free;
303
304 /* Initialize the MAX1619 chip */ 297 /* Initialize the MAX1619 chip */
305 max1619_init_client(new_client); 298 max1619_init_client(new_client);
306 299
307 /* Register sysfs hooks */ 300 /* Register sysfs hooks */
308 if ((err = sysfs_create_group(&new_client->dev.kobj, &max1619_group))) 301 if ((err = sysfs_create_group(&new_client->dev.kobj, &max1619_group)))
309 goto exit_detach; 302 goto exit_free;
310 303
311 data->hwmon_dev = hwmon_device_register(&new_client->dev); 304 data->hwmon_dev = hwmon_device_register(&new_client->dev);
312 if (IS_ERR(data->hwmon_dev)) { 305 if (IS_ERR(data->hwmon_dev)) {
@@ -318,8 +311,6 @@ static int max1619_detect(struct i2c_adapter *adapter, int address, int kind)
318 311
319exit_remove_files: 312exit_remove_files:
320 sysfs_remove_group(&new_client->dev.kobj, &max1619_group); 313 sysfs_remove_group(&new_client->dev.kobj, &max1619_group);
321exit_detach:
322 i2c_detach_client(new_client);
323exit_free: 314exit_free:
324 kfree(data); 315 kfree(data);
325exit: 316exit:
@@ -341,17 +332,13 @@ static void max1619_init_client(struct i2c_client *client)
341 config & 0xBF); /* run */ 332 config & 0xBF); /* run */
342} 333}
343 334
344static int max1619_detach_client(struct i2c_client *client) 335static int max1619_remove(struct i2c_client *client)
345{ 336{
346 struct max1619_data *data = i2c_get_clientdata(client); 337 struct max1619_data *data = i2c_get_clientdata(client);
347 int err;
348 338
349 hwmon_device_unregister(data->hwmon_dev); 339 hwmon_device_unregister(data->hwmon_dev);
350 sysfs_remove_group(&client->dev.kobj, &max1619_group); 340 sysfs_remove_group(&client->dev.kobj, &max1619_group);
351 341
352 if ((err = i2c_detach_client(client)))
353 return err;
354
355 kfree(data); 342 kfree(data);
356 return 0; 343 return 0;
357} 344}
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 52d528b76cc3..f27af6a9da41 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -104,22 +104,34 @@ I2C_CLIENT_INSMOD_1(max6650);
104 104
105#define DIV_FROM_REG(reg) (1 << (reg & 7)) 105#define DIV_FROM_REG(reg) (1 << (reg & 7))
106 106
107static int max6650_attach_adapter(struct i2c_adapter *adapter); 107static int max6650_probe(struct i2c_client *client,
108static int max6650_detect(struct i2c_adapter *adapter, int address, int kind); 108 const struct i2c_device_id *id);
109static int max6650_detect(struct i2c_client *client, int kind,
110 struct i2c_board_info *info);
109static int max6650_init_client(struct i2c_client *client); 111static int max6650_init_client(struct i2c_client *client);
110static int max6650_detach_client(struct i2c_client *client); 112static int max6650_remove(struct i2c_client *client);
111static struct max6650_data *max6650_update_device(struct device *dev); 113static struct max6650_data *max6650_update_device(struct device *dev);
112 114
113/* 115/*
114 * Driver data (common to all clients) 116 * Driver data (common to all clients)
115 */ 117 */
116 118
119static const struct i2c_device_id max6650_id[] = {
120 { "max6650", max6650 },
121 { }
122};
123MODULE_DEVICE_TABLE(i2c, max6650_id);
124
117static struct i2c_driver max6650_driver = { 125static struct i2c_driver max6650_driver = {
126 .class = I2C_CLASS_HWMON,
118 .driver = { 127 .driver = {
119 .name = "max6650", 128 .name = "max6650",
120 }, 129 },
121 .attach_adapter = max6650_attach_adapter, 130 .probe = max6650_probe,
122 .detach_client = max6650_detach_client, 131 .remove = max6650_remove,
132 .id_table = max6650_id,
133 .detect = max6650_detect,
134 .address_data = &addr_data,
123}; 135};
124 136
125/* 137/*
@@ -128,7 +140,6 @@ static struct i2c_driver max6650_driver = {
128 140
129struct max6650_data 141struct max6650_data
130{ 142{
131 struct i2c_client client;
132 struct device *hwmon_dev; 143 struct device *hwmon_dev;
133 struct mutex update_lock; 144 struct mutex update_lock;
134 char valid; /* zero until following fields are valid */ 145 char valid; /* zero until following fields are valid */
@@ -437,47 +448,21 @@ static struct attribute_group max6650_attr_grp = {
437 * Real code 448 * Real code
438 */ 449 */
439 450
440static int max6650_attach_adapter(struct i2c_adapter *adapter) 451/* Return 0 if detection is successful, -ENODEV otherwise */
452static int max6650_detect(struct i2c_client *client, int kind,
453 struct i2c_board_info *info)
441{ 454{
442 if (!(adapter->class & I2C_CLASS_HWMON)) { 455 struct i2c_adapter *adapter = client->adapter;
443 dev_dbg(&adapter->dev, 456 int address = client->addr;
444 "FATAL: max6650_attach_adapter class HWMON not set\n");
445 return 0;
446 }
447
448 return i2c_probe(adapter, &addr_data, max6650_detect);
449}
450
451/*
452 * The following function does more than just detection. If detection
453 * succeeds, it also registers the new chip.
454 */
455
456static int max6650_detect(struct i2c_adapter *adapter, int address, int kind)
457{
458 struct i2c_client *client;
459 struct max6650_data *data;
460 int err = -ENODEV;
461 457
462 dev_dbg(&adapter->dev, "max6650_detect called, kind = %d\n", kind); 458 dev_dbg(&adapter->dev, "max6650_detect called, kind = %d\n", kind);
463 459
464 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 460 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
465 dev_dbg(&adapter->dev, "max6650: I2C bus doesn't support " 461 dev_dbg(&adapter->dev, "max6650: I2C bus doesn't support "
466 "byte read mode, skipping.\n"); 462 "byte read mode, skipping.\n");
467 return 0; 463 return -ENODEV;
468 }
469
470 if (!(data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL))) {
471 dev_err(&adapter->dev, "max6650: out of memory.\n");
472 return -ENOMEM;
473 } 464 }
474 465
475 client = &data->client;
476 i2c_set_clientdata(client, data);
477 client->addr = address;
478 client->adapter = adapter;
479 client->driver = &max6650_driver;
480
481 /* 466 /*
482 * Now we do the remaining detection. A negative kind means that 467 * Now we do the remaining detection. A negative kind means that
483 * the driver was loaded with no force parameter (default), so we 468 * the driver was loaded with no force parameter (default), so we
@@ -501,28 +486,40 @@ static int max6650_detect(struct i2c_adapter *adapter, int address, int kind)
501 ||(i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT) & 0xFC))) { 486 ||(i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT) & 0xFC))) {
502 dev_dbg(&adapter->dev, 487 dev_dbg(&adapter->dev,
503 "max6650: detection failed at 0x%02x.\n", address); 488 "max6650: detection failed at 0x%02x.\n", address);
504 goto err_free; 489 return -ENODEV;
505 } 490 }
506 491
507 dev_info(&adapter->dev, "max6650: chip found at 0x%02x.\n", address); 492 dev_info(&adapter->dev, "max6650: chip found at 0x%02x.\n", address);
508 493
509 strlcpy(client->name, "max6650", I2C_NAME_SIZE); 494 strlcpy(info->type, "max6650", I2C_NAME_SIZE);
510 mutex_init(&data->update_lock);
511 495
512 if ((err = i2c_attach_client(client))) { 496 return 0;
513 dev_err(&adapter->dev, "max6650: failed to attach client.\n"); 497}
514 goto err_free; 498
499static int max6650_probe(struct i2c_client *client,
500 const struct i2c_device_id *id)
501{
502 struct max6650_data *data;
503 int err;
504
505 if (!(data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL))) {
506 dev_err(&client->dev, "out of memory.\n");
507 return -ENOMEM;
515 } 508 }
516 509
510 i2c_set_clientdata(client, data);
511 mutex_init(&data->update_lock);
512
517 /* 513 /*
518 * Initialize the max6650 chip 514 * Initialize the max6650 chip
519 */ 515 */
520 if (max6650_init_client(client)) 516 err = max6650_init_client(client);
521 goto err_detach; 517 if (err)
518 goto err_free;
522 519
523 err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp); 520 err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp);
524 if (err) 521 if (err)
525 goto err_detach; 522 goto err_free;
526 523
527 data->hwmon_dev = hwmon_device_register(&client->dev); 524 data->hwmon_dev = hwmon_device_register(&client->dev);
528 if (!IS_ERR(data->hwmon_dev)) 525 if (!IS_ERR(data->hwmon_dev))
@@ -531,24 +528,19 @@ static int max6650_detect(struct i2c_adapter *adapter, int address, int kind)
531 err = PTR_ERR(data->hwmon_dev); 528 err = PTR_ERR(data->hwmon_dev);
532 dev_err(&client->dev, "error registering hwmon device.\n"); 529 dev_err(&client->dev, "error registering hwmon device.\n");
533 sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); 530 sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
534err_detach:
535 i2c_detach_client(client);
536err_free: 531err_free:
537 kfree(data); 532 kfree(data);
538 return err; 533 return err;
539} 534}
540 535
541static int max6650_detach_client(struct i2c_client *client) 536static int max6650_remove(struct i2c_client *client)
542{ 537{
543 struct max6650_data *data = i2c_get_clientdata(client); 538 struct max6650_data *data = i2c_get_clientdata(client);
544 int err;
545 539
546 sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); 540 sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
547 hwmon_device_unregister(data->hwmon_dev); 541 hwmon_device_unregister(data->hwmon_dev);
548 err = i2c_detach_client(client); 542 kfree(data);
549 if (!err) 543 return 0;
550 kfree(data);
551 return err;
552} 544}
553 545
554static int max6650_init_client(struct i2c_client *client) 546static int max6650_init_client(struct i2c_client *client)
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 3c9db6598ba7..8bb5cb532d4d 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -96,7 +96,6 @@ static inline int TEMP_FROM_REG(s8 val)
96} 96}
97 97
98struct smsc47m192_data { 98struct smsc47m192_data {
99 struct i2c_client client;
100 struct device *hwmon_dev; 99 struct device *hwmon_dev;
101 struct mutex update_lock; 100 struct mutex update_lock;
102 char valid; /* !=0 if following fields are valid */ 101 char valid; /* !=0 if following fields are valid */
@@ -114,18 +113,29 @@ struct smsc47m192_data {
114 u8 vrm; 113 u8 vrm;
115}; 114};
116 115
117static int smsc47m192_attach_adapter(struct i2c_adapter *adapter); 116static int smsc47m192_probe(struct i2c_client *client,
118static int smsc47m192_detect(struct i2c_adapter *adapter, int address, 117 const struct i2c_device_id *id);
119 int kind); 118static int smsc47m192_detect(struct i2c_client *client, int kind,
120static int smsc47m192_detach_client(struct i2c_client *client); 119 struct i2c_board_info *info);
120static int smsc47m192_remove(struct i2c_client *client);
121static struct smsc47m192_data *smsc47m192_update_device(struct device *dev); 121static struct smsc47m192_data *smsc47m192_update_device(struct device *dev);
122 122
123static const struct i2c_device_id smsc47m192_id[] = {
124 { "smsc47m192", smsc47m192 },
125 { }
126};
127MODULE_DEVICE_TABLE(i2c, smsc47m192_id);
128
123static struct i2c_driver smsc47m192_driver = { 129static struct i2c_driver smsc47m192_driver = {
130 .class = I2C_CLASS_HWMON,
124 .driver = { 131 .driver = {
125 .name = "smsc47m192", 132 .name = "smsc47m192",
126 }, 133 },
127 .attach_adapter = smsc47m192_attach_adapter, 134 .probe = smsc47m192_probe,
128 .detach_client = smsc47m192_detach_client, 135 .remove = smsc47m192_remove,
136 .id_table = smsc47m192_id,
137 .detect = smsc47m192_detect,
138 .address_data = &addr_data,
129}; 139};
130 140
131/* Voltages */ 141/* Voltages */
@@ -440,17 +450,6 @@ static const struct attribute_group smsc47m192_group_in4 = {
440 .attrs = smsc47m192_attributes_in4, 450 .attrs = smsc47m192_attributes_in4,
441}; 451};
442 452
443/* This function is called when:
444 * smsc47m192_driver is inserted (when this module is loaded), for each
445 available adapter
446 * when a new adapter is inserted (and smsc47m192_driver is still present) */
447static int smsc47m192_attach_adapter(struct i2c_adapter *adapter)
448{
449 if (!(adapter->class & I2C_CLASS_HWMON))
450 return 0;
451 return i2c_probe(adapter, &addr_data, smsc47m192_detect);
452}
453
454static void smsc47m192_init_client(struct i2c_client *client) 453static void smsc47m192_init_client(struct i2c_client *client)
455{ 454{
456 int i; 455 int i;
@@ -481,31 +480,15 @@ static void smsc47m192_init_client(struct i2c_client *client)
481 } 480 }
482} 481}
483 482
484/* This function is called by i2c_probe */ 483/* Return 0 if detection is successful, -ENODEV otherwise */
485static int smsc47m192_detect(struct i2c_adapter *adapter, int address, 484static int smsc47m192_detect(struct i2c_client *client, int kind,
486 int kind) 485 struct i2c_board_info *info)
487{ 486{
488 struct i2c_client *client; 487 struct i2c_adapter *adapter = client->adapter;
489 struct smsc47m192_data *data; 488 int version;
490 int err = 0;
491 int version, config;
492 489
493 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 490 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
494 goto exit; 491 return -ENODEV;
495
496 if (!(data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL))) {
497 err = -ENOMEM;
498 goto exit;
499 }
500
501 client = &data->client;
502 i2c_set_clientdata(client, data);
503 client->addr = address;
504 client->adapter = adapter;
505 client->driver = &smsc47m192_driver;
506
507 if (kind == 0)
508 kind = smsc47m192;
509 492
510 /* Detection criteria from sensors_detect script */ 493 /* Detection criteria from sensors_detect script */
511 if (kind < 0) { 494 if (kind < 0) {
@@ -523,26 +506,39 @@ static int smsc47m192_detect(struct i2c_adapter *adapter, int address,
523 } else { 506 } else {
524 dev_dbg(&adapter->dev, 507 dev_dbg(&adapter->dev,
525 "SMSC47M192 detection failed at 0x%02x\n", 508 "SMSC47M192 detection failed at 0x%02x\n",
526 address); 509 client->addr);
527 goto exit_free; 510 return -ENODEV;
528 } 511 }
529 } 512 }
530 513
531 /* Fill in the remaining client fields and put into the global list */ 514 strlcpy(info->type, "smsc47m192", I2C_NAME_SIZE);
532 strlcpy(client->name, "smsc47m192", I2C_NAME_SIZE); 515
516 return 0;
517}
518
519static int smsc47m192_probe(struct i2c_client *client,
520 const struct i2c_device_id *id)
521{
522 struct smsc47m192_data *data;
523 int config;
524 int err;
525
526 data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL);
527 if (!data) {
528 err = -ENOMEM;
529 goto exit;
530 }
531
532 i2c_set_clientdata(client, data);
533 data->vrm = vid_which_vrm(); 533 data->vrm = vid_which_vrm();
534 mutex_init(&data->update_lock); 534 mutex_init(&data->update_lock);
535 535
536 /* Tell the I2C layer a new client has arrived */
537 if ((err = i2c_attach_client(client)))
538 goto exit_free;
539
540 /* Initialize the SMSC47M192 chip */ 536 /* Initialize the SMSC47M192 chip */
541 smsc47m192_init_client(client); 537 smsc47m192_init_client(client);
542 538
543 /* Register sysfs hooks */ 539 /* Register sysfs hooks */
544 if ((err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group))) 540 if ((err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group)))
545 goto exit_detach; 541 goto exit_free;
546 542
547 /* Pin 110 is either in4 (+12V) or VID4 */ 543 /* Pin 110 is either in4 (+12V) or VID4 */
548 config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); 544 config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG);
@@ -563,26 +559,20 @@ static int smsc47m192_detect(struct i2c_adapter *adapter, int address,
563exit_remove_files: 559exit_remove_files:
564 sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); 560 sysfs_remove_group(&client->dev.kobj, &smsc47m192_group);
565 sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); 561 sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4);
566exit_detach:
567 i2c_detach_client(client);
568exit_free: 562exit_free:
569 kfree(data); 563 kfree(data);
570exit: 564exit:
571 return err; 565 return err;
572} 566}
573 567
574static int smsc47m192_detach_client(struct i2c_client *client) 568static int smsc47m192_remove(struct i2c_client *client)
575{ 569{
576 struct smsc47m192_data *data = i2c_get_clientdata(client); 570 struct smsc47m192_data *data = i2c_get_clientdata(client);
577 int err;
578 571
579 hwmon_device_unregister(data->hwmon_dev); 572 hwmon_device_unregister(data->hwmon_dev);
580 sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); 573 sysfs_remove_group(&client->dev.kobj, &smsc47m192_group);
581 sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); 574 sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4);
582 575
583 if ((err = i2c_detach_client(client)))
584 return err;
585
586 kfree(data); 576 kfree(data);
587 577
588 return 0; 578 return 0;
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 76a3859c3fbe..3b01001108c1 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -60,7 +60,6 @@ static const u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B };
60 60
61/* Each client has this additional data */ 61/* Each client has this additional data */
62struct thmc50_data { 62struct thmc50_data {
63 struct i2c_client client;
64 struct device *hwmon_dev; 63 struct device *hwmon_dev;
65 64
66 struct mutex update_lock; 65 struct mutex update_lock;
@@ -77,17 +76,31 @@ struct thmc50_data {
77 u8 alarms; 76 u8 alarms;
78}; 77};
79 78
80static int thmc50_attach_adapter(struct i2c_adapter *adapter); 79static int thmc50_detect(struct i2c_client *client, int kind,
81static int thmc50_detach_client(struct i2c_client *client); 80 struct i2c_board_info *info);
81static int thmc50_probe(struct i2c_client *client,
82 const struct i2c_device_id *id);
83static int thmc50_remove(struct i2c_client *client);
82static void thmc50_init_client(struct i2c_client *client); 84static void thmc50_init_client(struct i2c_client *client);
83static struct thmc50_data *thmc50_update_device(struct device *dev); 85static struct thmc50_data *thmc50_update_device(struct device *dev);
84 86
87static const struct i2c_device_id thmc50_id[] = {
88 { "adm1022", adm1022 },
89 { "thmc50", thmc50 },
90 { }
91};
92MODULE_DEVICE_TABLE(i2c, thmc50_id);
93
85static struct i2c_driver thmc50_driver = { 94static struct i2c_driver thmc50_driver = {
95 .class = I2C_CLASS_HWMON,
86 .driver = { 96 .driver = {
87 .name = "thmc50", 97 .name = "thmc50",
88 }, 98 },
89 .attach_adapter = thmc50_attach_adapter, 99 .probe = thmc50_probe,
90 .detach_client = thmc50_detach_client, 100 .remove = thmc50_remove,
101 .id_table = thmc50_id,
102 .detect = thmc50_detect,
103 .address_data = &addr_data,
91}; 104};
92 105
93static ssize_t show_analog_out(struct device *dev, 106static ssize_t show_analog_out(struct device *dev,
@@ -250,39 +263,23 @@ static const struct attribute_group temp3_group = {
250 .attrs = temp3_attributes, 263 .attrs = temp3_attributes,
251}; 264};
252 265
253static int thmc50_detect(struct i2c_adapter *adapter, int address, int kind) 266/* Return 0 if detection is successful, -ENODEV otherwise */
267static int thmc50_detect(struct i2c_client *client, int kind,
268 struct i2c_board_info *info)
254{ 269{
255 unsigned company; 270 unsigned company;
256 unsigned revision; 271 unsigned revision;
257 unsigned config; 272 unsigned config;
258 struct i2c_client *client; 273 struct i2c_adapter *adapter = client->adapter;
259 struct thmc50_data *data;
260 struct device *dev;
261 int err = 0; 274 int err = 0;
262 const char *type_name; 275 const char *type_name;
263 276
264 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 277 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
265 pr_debug("thmc50: detect failed, " 278 pr_debug("thmc50: detect failed, "
266 "smbus byte data not supported!\n"); 279 "smbus byte data not supported!\n");
267 goto exit; 280 return -ENODEV;
268 }
269
270 /* OK. For now, we presume we have a valid client. We now create the
271 client structure, even though we cannot fill it completely yet.
272 But it allows us to access thmc50 registers. */
273 if (!(data = kzalloc(sizeof(struct thmc50_data), GFP_KERNEL))) {
274 pr_debug("thmc50: detect failed, kzalloc failed!\n");
275 err = -ENOMEM;
276 goto exit;
277 } 281 }
278 282
279 client = &data->client;
280 i2c_set_clientdata(client, data);
281 client->addr = address;
282 client->adapter = adapter;
283 client->driver = &thmc50_driver;
284 dev = &client->dev;
285
286 pr_debug("thmc50: Probing for THMC50 at 0x%2X on bus %d\n", 283 pr_debug("thmc50: Probing for THMC50 at 0x%2X on bus %d\n",
287 client->addr, i2c_adapter_id(client->adapter)); 284 client->addr, i2c_adapter_id(client->adapter));
288 285
@@ -307,21 +304,22 @@ static int thmc50_detect(struct i2c_adapter *adapter, int address, int kind)
307 } 304 }
308 if (err == -ENODEV) { 305 if (err == -ENODEV) {
309 pr_debug("thmc50: Detection of THMC50/ADM1022 failed\n"); 306 pr_debug("thmc50: Detection of THMC50/ADM1022 failed\n");
310 goto exit_free; 307 return err;
311 } 308 }
312 data->type = kind;
313 309
314 if (kind == adm1022) { 310 if (kind == adm1022) {
315 int id = i2c_adapter_id(client->adapter); 311 int id = i2c_adapter_id(client->adapter);
316 int i; 312 int i;
317 313
318 type_name = "adm1022"; 314 type_name = "adm1022";
319 data->has_temp3 = (config >> 7) & 1; /* config MSB */
320 for (i = 0; i + 1 < adm1022_temp3_num; i += 2) 315 for (i = 0; i + 1 < adm1022_temp3_num; i += 2)
321 if (adm1022_temp3[i] == id && 316 if (adm1022_temp3[i] == id &&
322 adm1022_temp3[i + 1] == address) { 317 adm1022_temp3[i + 1] == client->addr) {
323 /* enable 2nd remote temp */ 318 /* enable 2nd remote temp */
324 data->has_temp3 = 1; 319 config |= (1 << 7);
320 i2c_smbus_write_byte_data(client,
321 THMC50_REG_CONF,
322 config);
325 break; 323 break;
326 } 324 }
327 } else { 325 } else {
@@ -330,19 +328,33 @@ static int thmc50_detect(struct i2c_adapter *adapter, int address, int kind)
330 pr_debug("thmc50: Detected %s (version %x, revision %x)\n", 328 pr_debug("thmc50: Detected %s (version %x, revision %x)\n",
331 type_name, (revision >> 4) - 0xc, revision & 0xf); 329 type_name, (revision >> 4) - 0xc, revision & 0xf);
332 330
333 /* Fill in the remaining client fields & put it into the global list */ 331 strlcpy(info->type, type_name, I2C_NAME_SIZE);
334 strlcpy(client->name, type_name, I2C_NAME_SIZE);
335 mutex_init(&data->update_lock);
336 332
337 /* Tell the I2C layer a new client has arrived */ 333 return 0;
338 if ((err = i2c_attach_client(client))) 334}
339 goto exit_free; 335
336static int thmc50_probe(struct i2c_client *client,
337 const struct i2c_device_id *id)
338{
339 struct thmc50_data *data;
340 int err;
341
342 data = kzalloc(sizeof(struct thmc50_data), GFP_KERNEL);
343 if (!data) {
344 pr_debug("thmc50: detect failed, kzalloc failed!\n");
345 err = -ENOMEM;
346 goto exit;
347 }
348
349 i2c_set_clientdata(client, data);
350 data->type = id->driver_data;
351 mutex_init(&data->update_lock);
340 352
341 thmc50_init_client(client); 353 thmc50_init_client(client);
342 354
343 /* Register sysfs hooks */ 355 /* Register sysfs hooks */
344 if ((err = sysfs_create_group(&client->dev.kobj, &thmc50_group))) 356 if ((err = sysfs_create_group(&client->dev.kobj, &thmc50_group)))
345 goto exit_detach; 357 goto exit_free;
346 358
347 /* Register ADM1022 sysfs hooks */ 359 /* Register ADM1022 sysfs hooks */
348 if (data->has_temp3) 360 if (data->has_temp3)
@@ -364,34 +376,21 @@ exit_remove_sysfs:
364 sysfs_remove_group(&client->dev.kobj, &temp3_group); 376 sysfs_remove_group(&client->dev.kobj, &temp3_group);
365exit_remove_sysfs_thmc50: 377exit_remove_sysfs_thmc50:
366 sysfs_remove_group(&client->dev.kobj, &thmc50_group); 378 sysfs_remove_group(&client->dev.kobj, &thmc50_group);
367exit_detach:
368 i2c_detach_client(client);
369exit_free: 379exit_free:
370 kfree(data); 380 kfree(data);
371exit: 381exit:
372 return err; 382 return err;
373} 383}
374 384
375static int thmc50_attach_adapter(struct i2c_adapter *adapter) 385static int thmc50_remove(struct i2c_client *client)
376{
377 if (!(adapter->class & I2C_CLASS_HWMON))
378 return 0;
379 return i2c_probe(adapter, &addr_data, thmc50_detect);
380}
381
382static int thmc50_detach_client(struct i2c_client *client)
383{ 386{
384 struct thmc50_data *data = i2c_get_clientdata(client); 387 struct thmc50_data *data = i2c_get_clientdata(client);
385 int err;
386 388
387 hwmon_device_unregister(data->hwmon_dev); 389 hwmon_device_unregister(data->hwmon_dev);
388 sysfs_remove_group(&client->dev.kobj, &thmc50_group); 390 sysfs_remove_group(&client->dev.kobj, &thmc50_group);
389 if (data->has_temp3) 391 if (data->has_temp3)
390 sysfs_remove_group(&client->dev.kobj, &temp3_group); 392 sysfs_remove_group(&client->dev.kobj, &temp3_group);
391 393
392 if ((err = i2c_detach_client(client)))
393 return err;
394
395 kfree(data); 394 kfree(data);
396 395
397 return 0; 396 return 0;
@@ -412,8 +411,8 @@ static void thmc50_init_client(struct i2c_client *client)
412 } 411 }
413 config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); 412 config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF);
414 config |= 0x1; /* start the chip if it is in standby mode */ 413 config |= 0x1; /* start the chip if it is in standby mode */
415 if (data->has_temp3) 414 if (data->type == adm1022 && (config & (1 << 7)))
416 config |= 0x80; /* enable 2nd remote temp */ 415 data->has_temp3 = 1;
417 i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); 416 i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config);
418} 417}
419 418
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 85077c4c8039..e4e91c9d480a 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -247,7 +247,6 @@ static u8 div_to_reg(int nr, long val)
247} 247}
248 248
249struct w83791d_data { 249struct w83791d_data {
250 struct i2c_client client;
251 struct device *hwmon_dev; 250 struct device *hwmon_dev;
252 struct mutex update_lock; 251 struct mutex update_lock;
253 252
@@ -286,9 +285,11 @@ struct w83791d_data {
286 u8 vrm; /* hwmon-vid */ 285 u8 vrm; /* hwmon-vid */
287}; 286};
288 287
289static int w83791d_attach_adapter(struct i2c_adapter *adapter); 288static int w83791d_probe(struct i2c_client *client,
290static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind); 289 const struct i2c_device_id *id);
291static int w83791d_detach_client(struct i2c_client *client); 290static int w83791d_detect(struct i2c_client *client, int kind,
291 struct i2c_board_info *info);
292static int w83791d_remove(struct i2c_client *client);
292 293
293static int w83791d_read(struct i2c_client *client, u8 register); 294static int w83791d_read(struct i2c_client *client, u8 register);
294static int w83791d_write(struct i2c_client *client, u8 register, u8 value); 295static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
@@ -300,12 +301,22 @@ static void w83791d_print_debug(struct w83791d_data *data, struct device *dev);
300 301
301static void w83791d_init_client(struct i2c_client *client); 302static void w83791d_init_client(struct i2c_client *client);
302 303
304static const struct i2c_device_id w83791d_id[] = {
305 { "w83791d", w83791d },
306 { }
307};
308MODULE_DEVICE_TABLE(i2c, w83791d_id);
309
303static struct i2c_driver w83791d_driver = { 310static struct i2c_driver w83791d_driver = {
311 .class = I2C_CLASS_HWMON,
304 .driver = { 312 .driver = {
305 .name = "w83791d", 313 .name = "w83791d",
306 }, 314 },
307 .attach_adapter = w83791d_attach_adapter, 315 .probe = w83791d_probe,
308 .detach_client = w83791d_detach_client, 316 .remove = w83791d_remove,
317 .id_table = w83791d_id,
318 .detect = w83791d_detect,
319 .address_data = &addr_data,
309}; 320};
310 321
311/* following are the sysfs callback functions */ 322/* following are the sysfs callback functions */
@@ -905,49 +916,12 @@ static const struct attribute_group w83791d_group = {
905 .attrs = w83791d_attributes, 916 .attrs = w83791d_attributes,
906}; 917};
907 918
908/* This function is called when:
909 * w83791d_driver is inserted (when this module is loaded), for each
910 available adapter
911 * when a new adapter is inserted (and w83791d_driver is still present) */
912static int w83791d_attach_adapter(struct i2c_adapter *adapter)
913{
914 if (!(adapter->class & I2C_CLASS_HWMON))
915 return 0;
916 return i2c_probe(adapter, &addr_data, w83791d_detect);
917}
918
919 919
920static int w83791d_create_subclient(struct i2c_adapter *adapter, 920static int w83791d_detect_subclients(struct i2c_client *client)
921 struct i2c_client *client, int addr,
922 struct i2c_client **sub_cli)
923{
924 int err;
925 struct i2c_client *sub_client;
926
927 (*sub_cli) = sub_client =
928 kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
929 if (!(sub_client)) {
930 return -ENOMEM;
931 }
932 sub_client->addr = 0x48 + addr;
933 i2c_set_clientdata(sub_client, NULL);
934 sub_client->adapter = adapter;
935 sub_client->driver = &w83791d_driver;
936 strlcpy(sub_client->name, "w83791d subclient", I2C_NAME_SIZE);
937 if ((err = i2c_attach_client(sub_client))) {
938 dev_err(&client->dev, "subclient registration "
939 "at address 0x%x failed\n", sub_client->addr);
940 kfree(sub_client);
941 return err;
942 }
943 return 0;
944}
945
946
947static int w83791d_detect_subclients(struct i2c_adapter *adapter, int address,
948 int kind, struct i2c_client *client)
949{ 921{
922 struct i2c_adapter *adapter = client->adapter;
950 struct w83791d_data *data = i2c_get_clientdata(client); 923 struct w83791d_data *data = i2c_get_clientdata(client);
924 int address = client->addr;
951 int i, id, err; 925 int i, id, err;
952 u8 val; 926 u8 val;
953 927
@@ -971,10 +945,7 @@ static int w83791d_detect_subclients(struct i2c_adapter *adapter, int address,
971 945
972 val = w83791d_read(client, W83791D_REG_I2C_SUBADDR); 946 val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
973 if (!(val & 0x08)) { 947 if (!(val & 0x08)) {
974 err = w83791d_create_subclient(adapter, client, 948 data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
975 val & 0x7, &data->lm75[0]);
976 if (err < 0)
977 goto error_sc_0;
978 } 949 }
979 if (!(val & 0x80)) { 950 if (!(val & 0x80)) {
980 if ((data->lm75[0] != NULL) && 951 if ((data->lm75[0] != NULL) &&
@@ -986,10 +957,8 @@ static int w83791d_detect_subclients(struct i2c_adapter *adapter, int address,
986 err = -ENODEV; 957 err = -ENODEV;
987 goto error_sc_1; 958 goto error_sc_1;
988 } 959 }
989 err = w83791d_create_subclient(adapter, client, 960 data->lm75[1] = i2c_new_dummy(adapter,
990 (val >> 4) & 0x7, &data->lm75[1]); 961 0x48 + ((val >> 4) & 0x7));
991 if (err < 0)
992 goto error_sc_1;
993 } 962 }
994 963
995 return 0; 964 return 0;
@@ -997,53 +966,31 @@ static int w83791d_detect_subclients(struct i2c_adapter *adapter, int address,
997/* Undo inits in case of errors */ 966/* Undo inits in case of errors */
998 967
999error_sc_1: 968error_sc_1:
1000 if (data->lm75[0] != NULL) { 969 if (data->lm75[0] != NULL)
1001 i2c_detach_client(data->lm75[0]); 970 i2c_unregister_device(data->lm75[0]);
1002 kfree(data->lm75[0]);
1003 }
1004error_sc_0: 971error_sc_0:
1005 return err; 972 return err;
1006} 973}
1007 974
1008 975
1009static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind) 976/* Return 0 if detection is successful, -ENODEV otherwise */
977static int w83791d_detect(struct i2c_client *client, int kind,
978 struct i2c_board_info *info)
1010{ 979{
1011 struct i2c_client *client; 980 struct i2c_adapter *adapter = client->adapter;
1012 struct device *dev; 981 int val1, val2;
1013 struct w83791d_data *data; 982 unsigned short address = client->addr;
1014 int i, val1, val2;
1015 int err = 0;
1016 const char *client_name = "";
1017 983
1018 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 984 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1019 goto error0; 985 return -ENODEV;
1020 } 986 }
1021 987
1022 /* OK. For now, we presume we have a valid client. We now create the
1023 client structure, even though we cannot fill it completely yet.
1024 But it allows us to access w83791d_{read,write}_value. */
1025 if (!(data = kzalloc(sizeof(struct w83791d_data), GFP_KERNEL))) {
1026 err = -ENOMEM;
1027 goto error0;
1028 }
1029
1030 client = &data->client;
1031 dev = &client->dev;
1032 i2c_set_clientdata(client, data);
1033 client->addr = address;
1034 client->adapter = adapter;
1035 client->driver = &w83791d_driver;
1036 mutex_init(&data->update_lock);
1037
1038 /* Now, we do the remaining detection. */
1039
1040 /* The w83791d may be stuck in some other bank than bank 0. This may 988 /* The w83791d may be stuck in some other bank than bank 0. This may
1041 make reading other information impossible. Specify a force=... 989 make reading other information impossible. Specify a force=...
1042 parameter, and the Winbond will be reset to the right bank. */ 990 parameter, and the Winbond will be reset to the right bank. */
1043 if (kind < 0) { 991 if (kind < 0) {
1044 if (w83791d_read(client, W83791D_REG_CONFIG) & 0x80) { 992 if (w83791d_read(client, W83791D_REG_CONFIG) & 0x80) {
1045 dev_dbg(dev, "Detection failed at step 1\n"); 993 return -ENODEV;
1046 goto error1;
1047 } 994 }
1048 val1 = w83791d_read(client, W83791D_REG_BANK); 995 val1 = w83791d_read(client, W83791D_REG_BANK);
1049 val2 = w83791d_read(client, W83791D_REG_CHIPMAN); 996 val2 = w83791d_read(client, W83791D_REG_CHIPMAN);
@@ -1052,15 +999,13 @@ static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind)
1052 /* yes it is Bank0 */ 999 /* yes it is Bank0 */
1053 if (((!(val1 & 0x80)) && (val2 != 0xa3)) || 1000 if (((!(val1 & 0x80)) && (val2 != 0xa3)) ||
1054 ((val1 & 0x80) && (val2 != 0x5c))) { 1001 ((val1 & 0x80) && (val2 != 0x5c))) {
1055 dev_dbg(dev, "Detection failed at step 2\n"); 1002 return -ENODEV;
1056 goto error1;
1057 } 1003 }
1058 } 1004 }
1059 /* If Winbond chip, address of chip and W83791D_REG_I2C_ADDR 1005 /* If Winbond chip, address of chip and W83791D_REG_I2C_ADDR
1060 should match */ 1006 should match */
1061 if (w83791d_read(client, W83791D_REG_I2C_ADDR) != address) { 1007 if (w83791d_read(client, W83791D_REG_I2C_ADDR) != address) {
1062 dev_dbg(dev, "Detection failed at step 3\n"); 1008 return -ENODEV;
1063 goto error1;
1064 } 1009 }
1065 } 1010 }
1066 1011
@@ -1075,30 +1020,33 @@ static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind)
1075 /* get vendor ID */ 1020 /* get vendor ID */
1076 val2 = w83791d_read(client, W83791D_REG_CHIPMAN); 1021 val2 = w83791d_read(client, W83791D_REG_CHIPMAN);
1077 if (val2 != 0x5c) { /* the vendor is NOT Winbond */ 1022 if (val2 != 0x5c) { /* the vendor is NOT Winbond */
1078 dev_dbg(dev, "Detection failed at step 4\n"); 1023 return -ENODEV;
1079 goto error1;
1080 } 1024 }
1081 val1 = w83791d_read(client, W83791D_REG_WCHIPID); 1025 val1 = w83791d_read(client, W83791D_REG_WCHIPID);
1082 if (val1 == 0x71) { 1026 if (val1 == 0x71) {
1083 kind = w83791d; 1027 kind = w83791d;
1084 } else { 1028 } else {
1085 if (kind == 0) 1029 if (kind == 0)
1086 dev_warn(dev, 1030 dev_warn(&adapter->dev,
1087 "w83791d: Ignoring 'force' parameter " 1031 "w83791d: Ignoring 'force' parameter "
1088 "for unknown chip at adapter %d, " 1032 "for unknown chip at adapter %d, "
1089 "address 0x%02x\n", 1033 "address 0x%02x\n",
1090 i2c_adapter_id(adapter), address); 1034 i2c_adapter_id(adapter), address);
1091 goto error1; 1035 return -ENODEV;
1092 } 1036 }
1093 } 1037 }
1094 1038
1095 if (kind == w83791d) { 1039 strlcpy(info->type, "w83791d", I2C_NAME_SIZE);
1096 client_name = "w83791d"; 1040
1097 } else { 1041 return 0;
1098 dev_err(dev, "w83791d: Internal error: unknown kind (%d)?!?\n", 1042}
1099 kind); 1043
1100 goto error1; 1044static int w83791d_probe(struct i2c_client *client,
1101 } 1045 const struct i2c_device_id *id)
1046{
1047 struct w83791d_data *data;
1048 struct device *dev = &client->dev;
1049 int i, val1, err;
1102 1050
1103#ifdef DEBUG 1051#ifdef DEBUG
1104 val1 = w83791d_read(client, W83791D_REG_DID_VID4); 1052 val1 = w83791d_read(client, W83791D_REG_DID_VID4);
@@ -1106,15 +1054,18 @@ static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind)
1106 (val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1); 1054 (val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1);
1107#endif 1055#endif
1108 1056
1109 /* Fill in the remaining client fields and put into the global list */ 1057 data = kzalloc(sizeof(struct w83791d_data), GFP_KERNEL);
1110 strlcpy(client->name, client_name, I2C_NAME_SIZE); 1058 if (!data) {
1059 err = -ENOMEM;
1060 goto error0;
1061 }
1111 1062
1112 /* Tell the I2C layer a new client has arrived */ 1063 i2c_set_clientdata(client, data);
1113 if ((err = i2c_attach_client(client))) 1064 mutex_init(&data->update_lock);
1114 goto error1;
1115 1065
1116 if ((err = w83791d_detect_subclients(adapter, address, kind, client))) 1066 err = w83791d_detect_subclients(client);
1117 goto error2; 1067 if (err)
1068 goto error1;
1118 1069
1119 /* Initialize the chip */ 1070 /* Initialize the chip */
1120 w83791d_init_client(client); 1071 w83791d_init_client(client);
@@ -1141,43 +1092,29 @@ static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind)
1141error4: 1092error4:
1142 sysfs_remove_group(&client->dev.kobj, &w83791d_group); 1093 sysfs_remove_group(&client->dev.kobj, &w83791d_group);
1143error3: 1094error3:
1144 if (data->lm75[0] != NULL) { 1095 if (data->lm75[0] != NULL)
1145 i2c_detach_client(data->lm75[0]); 1096 i2c_unregister_device(data->lm75[0]);
1146 kfree(data->lm75[0]); 1097 if (data->lm75[1] != NULL)
1147 } 1098 i2c_unregister_device(data->lm75[1]);
1148 if (data->lm75[1] != NULL) {
1149 i2c_detach_client(data->lm75[1]);
1150 kfree(data->lm75[1]);
1151 }
1152error2:
1153 i2c_detach_client(client);
1154error1: 1099error1:
1155 kfree(data); 1100 kfree(data);
1156error0: 1101error0:
1157 return err; 1102 return err;
1158} 1103}
1159 1104
1160static int w83791d_detach_client(struct i2c_client *client) 1105static int w83791d_remove(struct i2c_client *client)
1161{ 1106{
1162 struct w83791d_data *data = i2c_get_clientdata(client); 1107 struct w83791d_data *data = i2c_get_clientdata(client);
1163 int err;
1164
1165 /* main client */
1166 if (data) {
1167 hwmon_device_unregister(data->hwmon_dev);
1168 sysfs_remove_group(&client->dev.kobj, &w83791d_group);
1169 }
1170 1108
1171 if ((err = i2c_detach_client(client))) 1109 hwmon_device_unregister(data->hwmon_dev);
1172 return err; 1110 sysfs_remove_group(&client->dev.kobj, &w83791d_group);
1173 1111
1174 /* main client */ 1112 if (data->lm75[0] != NULL)
1175 if (data) 1113 i2c_unregister_device(data->lm75[0]);
1176 kfree(data); 1114 if (data->lm75[1] != NULL)
1177 /* subclient */ 1115 i2c_unregister_device(data->lm75[1]);
1178 else
1179 kfree(client);
1180 1116
1117 kfree(data);
1181 return 0; 1118 return 0;
1182} 1119}
1183 1120
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 299629d47ed6..cf94c5b0c879 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -267,9 +267,7 @@ DIV_TO_REG(long val)
267} 267}
268 268
269struct w83792d_data { 269struct w83792d_data {
270 struct i2c_client client;
271 struct device *hwmon_dev; 270 struct device *hwmon_dev;
272 enum chips type;
273 271
274 struct mutex update_lock; 272 struct mutex update_lock;
275 char valid; /* !=0 if following fields are valid */ 273 char valid; /* !=0 if following fields are valid */
@@ -299,9 +297,11 @@ struct w83792d_data {
299 u8 sf2_levels[3][4]; /* Smart FanII: Fan1,2,3 duty cycle levels */ 297 u8 sf2_levels[3][4]; /* Smart FanII: Fan1,2,3 duty cycle levels */
300}; 298};
301 299
302static int w83792d_attach_adapter(struct i2c_adapter *adapter); 300static int w83792d_probe(struct i2c_client *client,
303static int w83792d_detect(struct i2c_adapter *adapter, int address, int kind); 301 const struct i2c_device_id *id);
304static int w83792d_detach_client(struct i2c_client *client); 302static int w83792d_detect(struct i2c_client *client, int kind,
303 struct i2c_board_info *info);
304static int w83792d_remove(struct i2c_client *client);
305static struct w83792d_data *w83792d_update_device(struct device *dev); 305static struct w83792d_data *w83792d_update_device(struct device *dev);
306 306
307#ifdef DEBUG 307#ifdef DEBUG
@@ -310,12 +310,22 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev);
310 310
311static void w83792d_init_client(struct i2c_client *client); 311static void w83792d_init_client(struct i2c_client *client);
312 312
313static const struct i2c_device_id w83792d_id[] = {
314 { "w83792d", w83792d },
315 { }
316};
317MODULE_DEVICE_TABLE(i2c, w83792d_id);
318
313static struct i2c_driver w83792d_driver = { 319static struct i2c_driver w83792d_driver = {
320 .class = I2C_CLASS_HWMON,
314 .driver = { 321 .driver = {
315 .name = "w83792d", 322 .name = "w83792d",
316 }, 323 },
317 .attach_adapter = w83792d_attach_adapter, 324 .probe = w83792d_probe,
318 .detach_client = w83792d_detach_client, 325 .remove = w83792d_remove,
326 .id_table = w83792d_id,
327 .detect = w83792d_detect,
328 .address_data = &addr_data,
319}; 329};
320 330
321static inline long in_count_from_reg(int nr, struct w83792d_data *data) 331static inline long in_count_from_reg(int nr, struct w83792d_data *data)
@@ -864,53 +874,14 @@ store_sf2_level(struct device *dev, struct device_attribute *attr,
864 return count; 874 return count;
865} 875}
866 876
867/* This function is called when:
868 * w83792d_driver is inserted (when this module is loaded), for each
869 available adapter
870 * when a new adapter is inserted (and w83792d_driver is still present) */
871static int
872w83792d_attach_adapter(struct i2c_adapter *adapter)
873{
874 if (!(adapter->class & I2C_CLASS_HWMON))
875 return 0;
876 return i2c_probe(adapter, &addr_data, w83792d_detect);
877}
878
879
880static int
881w83792d_create_subclient(struct i2c_adapter *adapter,
882 struct i2c_client *new_client, int addr,
883 struct i2c_client **sub_cli)
884{
885 int err;
886 struct i2c_client *sub_client;
887
888 (*sub_cli) = sub_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
889 if (!(sub_client)) {
890 return -ENOMEM;
891 }
892 sub_client->addr = 0x48 + addr;
893 i2c_set_clientdata(sub_client, NULL);
894 sub_client->adapter = adapter;
895 sub_client->driver = &w83792d_driver;
896 sub_client->flags = 0;
897 strlcpy(sub_client->name, "w83792d subclient", I2C_NAME_SIZE);
898 if ((err = i2c_attach_client(sub_client))) {
899 dev_err(&new_client->dev, "subclient registration "
900 "at address 0x%x failed\n", sub_client->addr);
901 kfree(sub_client);
902 return err;
903 }
904 return 0;
905}
906
907 877
908static int 878static int
909w83792d_detect_subclients(struct i2c_adapter *adapter, int address, int kind, 879w83792d_detect_subclients(struct i2c_client *new_client)
910 struct i2c_client *new_client)
911{ 880{
912 int i, id, err; 881 int i, id, err;
882 int address = new_client->addr;
913 u8 val; 883 u8 val;
884 struct i2c_adapter *adapter = new_client->adapter;
914 struct w83792d_data *data = i2c_get_clientdata(new_client); 885 struct w83792d_data *data = i2c_get_clientdata(new_client);
915 886
916 id = i2c_adapter_id(adapter); 887 id = i2c_adapter_id(adapter);
@@ -932,10 +903,7 @@ w83792d_detect_subclients(struct i2c_adapter *adapter, int address, int kind,
932 903
933 val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR); 904 val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
934 if (!(val & 0x08)) { 905 if (!(val & 0x08)) {
935 err = w83792d_create_subclient(adapter, new_client, val & 0x7, 906 data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
936 &data->lm75[0]);
937 if (err < 0)
938 goto ERROR_SC_0;
939 } 907 }
940 if (!(val & 0x80)) { 908 if (!(val & 0x80)) {
941 if ((data->lm75[0] != NULL) && 909 if ((data->lm75[0] != NULL) &&
@@ -945,10 +913,8 @@ w83792d_detect_subclients(struct i2c_adapter *adapter, int address, int kind,
945 err = -ENODEV; 913 err = -ENODEV;
946 goto ERROR_SC_1; 914 goto ERROR_SC_1;
947 } 915 }
948 err = w83792d_create_subclient(adapter, new_client, 916 data->lm75[1] = i2c_new_dummy(adapter,
949 (val >> 4) & 0x7, &data->lm75[1]); 917 0x48 + ((val >> 4) & 0x7));
950 if (err < 0)
951 goto ERROR_SC_1;
952 } 918 }
953 919
954 return 0; 920 return 0;
@@ -956,10 +922,8 @@ w83792d_detect_subclients(struct i2c_adapter *adapter, int address, int kind,
956/* Undo inits in case of errors */ 922/* Undo inits in case of errors */
957 923
958ERROR_SC_1: 924ERROR_SC_1:
959 if (data->lm75[0] != NULL) { 925 if (data->lm75[0] != NULL)
960 i2c_detach_client(data->lm75[0]); 926 i2c_unregister_device(data->lm75[0]);
961 kfree(data->lm75[0]);
962 }
963ERROR_SC_0: 927ERROR_SC_0:
964 return err; 928 return err;
965} 929}
@@ -1294,47 +1258,25 @@ static const struct attribute_group w83792d_group = {
1294 .attrs = w83792d_attributes, 1258 .attrs = w83792d_attributes,
1295}; 1259};
1296 1260
1261/* Return 0 if detection is successful, -ENODEV otherwise */
1297static int 1262static int
1298w83792d_detect(struct i2c_adapter *adapter, int address, int kind) 1263w83792d_detect(struct i2c_client *client, int kind, struct i2c_board_info *info)
1299{ 1264{
1300 int i = 0, val1 = 0, val2; 1265 struct i2c_adapter *adapter = client->adapter;
1301 struct i2c_client *client; 1266 int val1, val2;
1302 struct device *dev; 1267 unsigned short address = client->addr;
1303 struct w83792d_data *data;
1304 int err = 0;
1305 const char *client_name = "";
1306 1268
1307 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 1269 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1308 goto ERROR0; 1270 return -ENODEV;
1309 }
1310
1311 /* OK. For now, we presume we have a valid client. We now create the
1312 client structure, even though we cannot fill it completely yet.
1313 But it allows us to access w83792d_{read,write}_value. */
1314
1315 if (!(data = kzalloc(sizeof(struct w83792d_data), GFP_KERNEL))) {
1316 err = -ENOMEM;
1317 goto ERROR0;
1318 } 1271 }
1319 1272
1320 client = &data->client;
1321 dev = &client->dev;
1322 i2c_set_clientdata(client, data);
1323 client->addr = address;
1324 client->adapter = adapter;
1325 client->driver = &w83792d_driver;
1326 client->flags = 0;
1327
1328 /* Now, we do the remaining detection. */
1329
1330 /* The w83792d may be stuck in some other bank than bank 0. This may 1273 /* The w83792d may be stuck in some other bank than bank 0. This may
1331 make reading other information impossible. Specify a force=... or 1274 make reading other information impossible. Specify a force=... or
1332 force_*=... parameter, and the Winbond will be reset to the right 1275 force_*=... parameter, and the Winbond will be reset to the right
1333 bank. */ 1276 bank. */
1334 if (kind < 0) { 1277 if (kind < 0) {
1335 if (w83792d_read_value(client, W83792D_REG_CONFIG) & 0x80) { 1278 if (w83792d_read_value(client, W83792D_REG_CONFIG) & 0x80) {
1336 dev_dbg(dev, "Detection failed at step 1\n"); 1279 return -ENODEV;
1337 goto ERROR1;
1338 } 1280 }
1339 val1 = w83792d_read_value(client, W83792D_REG_BANK); 1281 val1 = w83792d_read_value(client, W83792D_REG_BANK);
1340 val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN); 1282 val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN);
@@ -1342,16 +1284,14 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind)
1342 if (!(val1 & 0x07)) { /* is Bank0 */ 1284 if (!(val1 & 0x07)) { /* is Bank0 */
1343 if (((!(val1 & 0x80)) && (val2 != 0xa3)) || 1285 if (((!(val1 & 0x80)) && (val2 != 0xa3)) ||
1344 ((val1 & 0x80) && (val2 != 0x5c))) { 1286 ((val1 & 0x80) && (val2 != 0x5c))) {
1345 dev_dbg(dev, "Detection failed at step 2\n"); 1287 return -ENODEV;
1346 goto ERROR1;
1347 } 1288 }
1348 } 1289 }
1349 /* If Winbond chip, address of chip and W83792D_REG_I2C_ADDR 1290 /* If Winbond chip, address of chip and W83792D_REG_I2C_ADDR
1350 should match */ 1291 should match */
1351 if (w83792d_read_value(client, 1292 if (w83792d_read_value(client,
1352 W83792D_REG_I2C_ADDR) != address) { 1293 W83792D_REG_I2C_ADDR) != address) {
1353 dev_dbg(dev, "Detection failed at step 3\n"); 1294 return -ENODEV;
1354 goto ERROR1;
1355 } 1295 }
1356 } 1296 }
1357 1297
@@ -1367,45 +1307,48 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind)
1367 /* get vendor ID */ 1307 /* get vendor ID */
1368 val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN); 1308 val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN);
1369 if (val2 != 0x5c) { /* the vendor is NOT Winbond */ 1309 if (val2 != 0x5c) { /* the vendor is NOT Winbond */
1370 goto ERROR1; 1310 return -ENODEV;
1371 } 1311 }
1372 val1 = w83792d_read_value(client, W83792D_REG_WCHIPID); 1312 val1 = w83792d_read_value(client, W83792D_REG_WCHIPID);
1373 if (val1 == 0x7a) { 1313 if (val1 == 0x7a) {
1374 kind = w83792d; 1314 kind = w83792d;
1375 } else { 1315 } else {
1376 if (kind == 0) 1316 if (kind == 0)
1377 dev_warn(dev, 1317 dev_warn(&adapter->dev,
1378 "w83792d: Ignoring 'force' parameter for" 1318 "w83792d: Ignoring 'force' parameter for"
1379 " unknown chip at adapter %d, address" 1319 " unknown chip at adapter %d, address"
1380 " 0x%02x\n", i2c_adapter_id(adapter), 1320 " 0x%02x\n", i2c_adapter_id(adapter),
1381 address); 1321 address);
1382 goto ERROR1; 1322 return -ENODEV;
1383 } 1323 }
1384 } 1324 }
1385 1325
1386 if (kind == w83792d) { 1326 strlcpy(info->type, "w83792d", I2C_NAME_SIZE);
1387 client_name = "w83792d"; 1327
1388 } else { 1328 return 0;
1389 dev_err(dev, "w83792d: Internal error: unknown kind (%d)?!?\n", 1329}
1390 kind); 1330
1391 goto ERROR1; 1331static int
1392 } 1332w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
1333{
1334 struct w83792d_data *data;
1335 struct device *dev = &client->dev;
1336 int i, val1, err;
1393 1337
1394 /* Fill in the remaining client fields and put into the global list */ 1338 data = kzalloc(sizeof(struct w83792d_data), GFP_KERNEL);
1395 strlcpy(client->name, client_name, I2C_NAME_SIZE); 1339 if (!data) {
1396 data->type = kind; 1340 err = -ENOMEM;
1341 goto ERROR0;
1342 }
1397 1343
1344 i2c_set_clientdata(client, data);
1398 data->valid = 0; 1345 data->valid = 0;
1399 mutex_init(&data->update_lock); 1346 mutex_init(&data->update_lock);
1400 1347
1401 /* Tell the I2C layer a new client has arrived */ 1348 err = w83792d_detect_subclients(client);
1402 if ((err = i2c_attach_client(client))) 1349 if (err)
1403 goto ERROR1; 1350 goto ERROR1;
1404 1351
1405 if ((err = w83792d_detect_subclients(adapter, address,
1406 kind, client)))
1407 goto ERROR2;
1408
1409 /* Initialize the chip */ 1352 /* Initialize the chip */
1410 w83792d_init_client(client); 1353 w83792d_init_client(client);
1411 1354
@@ -1457,16 +1400,10 @@ exit_remove_files:
1457 for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++) 1400 for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
1458 sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]); 1401 sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
1459ERROR3: 1402ERROR3:
1460 if (data->lm75[0] != NULL) { 1403 if (data->lm75[0] != NULL)
1461 i2c_detach_client(data->lm75[0]); 1404 i2c_unregister_device(data->lm75[0]);
1462 kfree(data->lm75[0]); 1405 if (data->lm75[1] != NULL)
1463 } 1406 i2c_unregister_device(data->lm75[1]);
1464 if (data->lm75[1] != NULL) {
1465 i2c_detach_client(data->lm75[1]);
1466 kfree(data->lm75[1]);
1467 }
1468ERROR2:
1469 i2c_detach_client(client);
1470ERROR1: 1407ERROR1:
1471 kfree(data); 1408 kfree(data);
1472ERROR0: 1409ERROR0:
@@ -1474,30 +1411,23 @@ ERROR0:
1474} 1411}
1475 1412
1476static int 1413static int
1477w83792d_detach_client(struct i2c_client *client) 1414w83792d_remove(struct i2c_client *client)
1478{ 1415{
1479 struct w83792d_data *data = i2c_get_clientdata(client); 1416 struct w83792d_data *data = i2c_get_clientdata(client);
1480 int err, i; 1417 int i;
1481
1482 /* main client */
1483 if (data) {
1484 hwmon_device_unregister(data->hwmon_dev);
1485 sysfs_remove_group(&client->dev.kobj, &w83792d_group);
1486 for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
1487 sysfs_remove_group(&client->dev.kobj,
1488 &w83792d_group_fan[i]);
1489 }
1490 1418
1491 if ((err = i2c_detach_client(client))) 1419 hwmon_device_unregister(data->hwmon_dev);
1492 return err; 1420 sysfs_remove_group(&client->dev.kobj, &w83792d_group);
1421 for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
1422 sysfs_remove_group(&client->dev.kobj,
1423 &w83792d_group_fan[i]);
1493 1424
1494 /* main client */ 1425 if (data->lm75[0] != NULL)
1495 if (data) 1426 i2c_unregister_device(data->lm75[0]);
1496 kfree(data); 1427 if (data->lm75[1] != NULL)
1497 /* subclient */ 1428 i2c_unregister_device(data->lm75[1]);
1498 else
1499 kfree(client);
1500 1429
1430 kfree(data);
1501 return 0; 1431 return 0;
1502} 1432}
1503 1433
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index ed3c019b78c7..0a739f1c69be 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -179,7 +179,6 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
179} 179}
180 180
181struct w83793_data { 181struct w83793_data {
182 struct i2c_client client;
183 struct i2c_client *lm75[2]; 182 struct i2c_client *lm75[2];
184 struct device *hwmon_dev; 183 struct device *hwmon_dev;
185 struct mutex update_lock; 184 struct mutex update_lock;
@@ -226,19 +225,31 @@ struct w83793_data {
226 225
227static u8 w83793_read_value(struct i2c_client *client, u16 reg); 226static u8 w83793_read_value(struct i2c_client *client, u16 reg);
228static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value); 227static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
229static int w83793_attach_adapter(struct i2c_adapter *adapter); 228static int w83793_probe(struct i2c_client *client,
230static int w83793_detect(struct i2c_adapter *adapter, int address, int kind); 229 const struct i2c_device_id *id);
231static int w83793_detach_client(struct i2c_client *client); 230static int w83793_detect(struct i2c_client *client, int kind,
231 struct i2c_board_info *info);
232static int w83793_remove(struct i2c_client *client);
232static void w83793_init_client(struct i2c_client *client); 233static void w83793_init_client(struct i2c_client *client);
233static void w83793_update_nonvolatile(struct device *dev); 234static void w83793_update_nonvolatile(struct device *dev);
234static struct w83793_data *w83793_update_device(struct device *dev); 235static struct w83793_data *w83793_update_device(struct device *dev);
235 236
237static const struct i2c_device_id w83793_id[] = {
238 { "w83793", w83793 },
239 { }
240};
241MODULE_DEVICE_TABLE(i2c, w83793_id);
242
236static struct i2c_driver w83793_driver = { 243static struct i2c_driver w83793_driver = {
244 .class = I2C_CLASS_HWMON,
237 .driver = { 245 .driver = {
238 .name = "w83793", 246 .name = "w83793",
239 }, 247 },
240 .attach_adapter = w83793_attach_adapter, 248 .probe = w83793_probe,
241 .detach_client = w83793_detach_client, 249 .remove = w83793_remove,
250 .id_table = w83793_id,
251 .detect = w83793_detect,
252 .address_data = &addr_data,
242}; 253};
243 254
244static ssize_t 255static ssize_t
@@ -1053,89 +1064,51 @@ static void w83793_init_client(struct i2c_client *client)
1053 1064
1054} 1065}
1055 1066
1056static int w83793_attach_adapter(struct i2c_adapter *adapter) 1067static int w83793_remove(struct i2c_client *client)
1057{
1058 if (!(adapter->class & I2C_CLASS_HWMON))
1059 return 0;
1060 return i2c_probe(adapter, &addr_data, w83793_detect);
1061}
1062
1063static int w83793_detach_client(struct i2c_client *client)
1064{ 1068{
1065 struct w83793_data *data = i2c_get_clientdata(client); 1069 struct w83793_data *data = i2c_get_clientdata(client);
1066 struct device *dev = &client->dev; 1070 struct device *dev = &client->dev;
1067 int err, i; 1071 int i;
1068 1072
1069 /* main client */ 1073 hwmon_device_unregister(data->hwmon_dev);
1070 if (data) {
1071 hwmon_device_unregister(data->hwmon_dev);
1072 1074
1073 for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) 1075 for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++)
1074 device_remove_file(dev, 1076 device_remove_file(dev,
1075 &w83793_sensor_attr_2[i].dev_attr); 1077 &w83793_sensor_attr_2[i].dev_attr);
1076 1078
1077 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) 1079 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
1078 device_remove_file(dev, &sda_single_files[i].dev_attr); 1080 device_remove_file(dev, &sda_single_files[i].dev_attr);
1079 1081
1080 for (i = 0; i < ARRAY_SIZE(w83793_vid); i++) 1082 for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
1081 device_remove_file(dev, &w83793_vid[i].dev_attr); 1083 device_remove_file(dev, &w83793_vid[i].dev_attr);
1082 device_remove_file(dev, &dev_attr_vrm); 1084 device_remove_file(dev, &dev_attr_vrm);
1083 1085
1084 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++) 1086 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
1085 device_remove_file(dev, &w83793_left_fan[i].dev_attr); 1087 device_remove_file(dev, &w83793_left_fan[i].dev_attr);
1086 1088
1087 for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++) 1089 for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
1088 device_remove_file(dev, &w83793_left_pwm[i].dev_attr); 1090 device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
1089 1091
1090 for (i = 0; i < ARRAY_SIZE(w83793_temp); i++) 1092 for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
1091 device_remove_file(dev, &w83793_temp[i].dev_attr); 1093 device_remove_file(dev, &w83793_temp[i].dev_attr);
1092 }
1093 1094
1094 if ((err = i2c_detach_client(client))) 1095 if (data->lm75[0] != NULL)
1095 return err; 1096 i2c_unregister_device(data->lm75[0]);
1097 if (data->lm75[1] != NULL)
1098 i2c_unregister_device(data->lm75[1]);
1096 1099
1097 /* main client */ 1100 kfree(data);
1098 if (data)
1099 kfree(data);
1100 /* subclient */
1101 else
1102 kfree(client);
1103 1101
1104 return 0; 1102 return 0;
1105} 1103}
1106 1104
1107static int 1105static int
1108w83793_create_subclient(struct i2c_adapter *adapter, 1106w83793_detect_subclients(struct i2c_client *client)
1109 struct i2c_client *client, int addr,
1110 struct i2c_client **sub_cli)
1111{
1112 int err = 0;
1113 struct i2c_client *sub_client;
1114
1115 (*sub_cli) = sub_client =
1116 kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
1117 if (!(sub_client)) {
1118 return -ENOMEM;
1119 }
1120 sub_client->addr = 0x48 + addr;
1121 i2c_set_clientdata(sub_client, NULL);
1122 sub_client->adapter = adapter;
1123 sub_client->driver = &w83793_driver;
1124 strlcpy(sub_client->name, "w83793 subclient", I2C_NAME_SIZE);
1125 if ((err = i2c_attach_client(sub_client))) {
1126 dev_err(&client->dev, "subclient registration "
1127 "at address 0x%x failed\n", sub_client->addr);
1128 kfree(sub_client);
1129 }
1130 return err;
1131}
1132
1133static int
1134w83793_detect_subclients(struct i2c_adapter *adapter, int address,
1135 int kind, struct i2c_client *client)
1136{ 1107{
1137 int i, id, err; 1108 int i, id, err;
1109 int address = client->addr;
1138 u8 tmp; 1110 u8 tmp;
1111 struct i2c_adapter *adapter = client->adapter;
1139 struct w83793_data *data = i2c_get_clientdata(client); 1112 struct w83793_data *data = i2c_get_clientdata(client);
1140 1113
1141 id = i2c_adapter_id(adapter); 1114 id = i2c_adapter_id(adapter);
@@ -1158,11 +1131,7 @@ w83793_detect_subclients(struct i2c_adapter *adapter, int address,
1158 1131
1159 tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR); 1132 tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
1160 if (!(tmp & 0x08)) { 1133 if (!(tmp & 0x08)) {
1161 err = 1134 data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (tmp & 0x7));
1162 w83793_create_subclient(adapter, client, tmp & 0x7,
1163 &data->lm75[0]);
1164 if (err < 0)
1165 goto ERROR_SC_0;
1166 } 1135 }
1167 if (!(tmp & 0x80)) { 1136 if (!(tmp & 0x80)) {
1168 if ((data->lm75[0] != NULL) 1137 if ((data->lm75[0] != NULL)
@@ -1173,10 +1142,8 @@ w83793_detect_subclients(struct i2c_adapter *adapter, int address,
1173 err = -ENODEV; 1142 err = -ENODEV;
1174 goto ERROR_SC_1; 1143 goto ERROR_SC_1;
1175 } 1144 }
1176 err = w83793_create_subclient(adapter, client, 1145 data->lm75[1] = i2c_new_dummy(adapter,
1177 (tmp >> 4) & 0x7, &data->lm75[1]); 1146 0x48 + ((tmp >> 4) & 0x7));
1178 if (err < 0)
1179 goto ERROR_SC_1;
1180 } 1147 }
1181 1148
1182 return 0; 1149 return 0;
@@ -1184,69 +1151,44 @@ w83793_detect_subclients(struct i2c_adapter *adapter, int address,
1184 /* Undo inits in case of errors */ 1151 /* Undo inits in case of errors */
1185 1152
1186ERROR_SC_1: 1153ERROR_SC_1:
1187 if (data->lm75[0] != NULL) { 1154 if (data->lm75[0] != NULL)
1188 i2c_detach_client(data->lm75[0]); 1155 i2c_unregister_device(data->lm75[0]);
1189 kfree(data->lm75[0]);
1190 }
1191ERROR_SC_0: 1156ERROR_SC_0:
1192 return err; 1157 return err;
1193} 1158}
1194 1159
1195static int w83793_detect(struct i2c_adapter *adapter, int address, int kind) 1160/* Return 0 if detection is successful, -ENODEV otherwise */
1161static int w83793_detect(struct i2c_client *client, int kind,
1162 struct i2c_board_info *info)
1196{ 1163{
1197 int i; 1164 u8 tmp, bank;
1198 u8 tmp, val; 1165 struct i2c_adapter *adapter = client->adapter;
1199 struct i2c_client *client; 1166 unsigned short address = client->addr;
1200 struct device *dev;
1201 struct w83793_data *data;
1202 int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
1203 int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5;
1204 int files_temp = ARRAY_SIZE(w83793_temp) / 6;
1205 int err = 0;
1206 1167
1207 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 1168 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1208 goto exit; 1169 return -ENODEV;
1209 } 1170 }
1210 1171
1211 /* OK. For now, we presume we have a valid client. We now create the 1172 bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
1212 client structure, even though we cannot fill it completely yet.
1213 But it allows us to access w83793_{read,write}_value. */
1214
1215 if (!(data = kzalloc(sizeof(struct w83793_data), GFP_KERNEL))) {
1216 err = -ENOMEM;
1217 goto exit;
1218 }
1219
1220 client = &data->client;
1221 dev = &client->dev;
1222 i2c_set_clientdata(client, data);
1223 client->addr = address;
1224 client->adapter = adapter;
1225 client->driver = &w83793_driver;
1226 1173
1227 data->bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
1228
1229 /* Now, we do the remaining detection. */
1230 if (kind < 0) { 1174 if (kind < 0) {
1231 tmp = data->bank & 0x80 ? 0x5c : 0xa3; 1175 tmp = bank & 0x80 ? 0x5c : 0xa3;
1232 /* Check Winbond vendor ID */ 1176 /* Check Winbond vendor ID */
1233 if (tmp != i2c_smbus_read_byte_data(client, 1177 if (tmp != i2c_smbus_read_byte_data(client,
1234 W83793_REG_VENDORID)) { 1178 W83793_REG_VENDORID)) {
1235 pr_debug("w83793: Detection failed at check " 1179 pr_debug("w83793: Detection failed at check "
1236 "vendor id\n"); 1180 "vendor id\n");
1237 err = -ENODEV; 1181 return -ENODEV;
1238 goto free_mem;
1239 } 1182 }
1240 1183
1241 /* If Winbond chip, address of chip and W83793_REG_I2C_ADDR 1184 /* If Winbond chip, address of chip and W83793_REG_I2C_ADDR
1242 should match */ 1185 should match */
1243 if ((data->bank & 0x07) == 0 1186 if ((bank & 0x07) == 0
1244 && i2c_smbus_read_byte_data(client, W83793_REG_I2C_ADDR) != 1187 && i2c_smbus_read_byte_data(client, W83793_REG_I2C_ADDR) !=
1245 (address << 1)) { 1188 (address << 1)) {
1246 pr_debug("w83793: Detection failed at check " 1189 pr_debug("w83793: Detection failed at check "
1247 "i2c addr\n"); 1190 "i2c addr\n");
1248 err = -ENODEV; 1191 return -ENODEV;
1249 goto free_mem;
1250 } 1192 }
1251 1193
1252 } 1194 }
@@ -1255,30 +1197,47 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1255 Winbond. Determine the chip type now */ 1197 Winbond. Determine the chip type now */
1256 1198
1257 if (kind <= 0) { 1199 if (kind <= 0) {
1258 if (0x7b == w83793_read_value(client, W83793_REG_CHIPID)) { 1200 if (0x7b == i2c_smbus_read_byte_data(client,
1201 W83793_REG_CHIPID)) {
1259 kind = w83793; 1202 kind = w83793;
1260 } else { 1203 } else {
1261 if (kind == 0) 1204 if (kind == 0)
1262 dev_warn(&adapter->dev, "w83793: Ignoring " 1205 dev_warn(&adapter->dev, "w83793: Ignoring "
1263 "'force' parameter for unknown chip " 1206 "'force' parameter for unknown chip "
1264 "at address 0x%02x\n", address); 1207 "at address 0x%02x\n", address);
1265 err = -ENODEV; 1208 return -ENODEV;
1266 goto free_mem;
1267 } 1209 }
1268 } 1210 }
1269 1211
1270 /* Fill in the remaining client fields and put into the global list */ 1212 strlcpy(info->type, "w83793", I2C_NAME_SIZE);
1271 strlcpy(client->name, "w83793", I2C_NAME_SIZE); 1213
1214 return 0;
1215}
1272 1216
1217static int w83793_probe(struct i2c_client *client,
1218 const struct i2c_device_id *id)
1219{
1220 struct device *dev = &client->dev;
1221 struct w83793_data *data;
1222 int i, tmp, val, err;
1223 int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
1224 int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5;
1225 int files_temp = ARRAY_SIZE(w83793_temp) / 6;
1226
1227 data = kzalloc(sizeof(struct w83793_data), GFP_KERNEL);
1228 if (!data) {
1229 err = -ENOMEM;
1230 goto exit;
1231 }
1232
1233 i2c_set_clientdata(client, data);
1234 data->bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
1273 mutex_init(&data->update_lock); 1235 mutex_init(&data->update_lock);
1274 1236
1275 /* Tell the I2C layer a new client has arrived */ 1237 err = w83793_detect_subclients(client);
1276 if ((err = i2c_attach_client(client))) 1238 if (err)
1277 goto free_mem; 1239 goto free_mem;
1278 1240
1279 if ((err = w83793_detect_subclients(adapter, address, kind, client)))
1280 goto detach_client;
1281
1282 /* Initialize the chip */ 1241 /* Initialize the chip */
1283 w83793_init_client(client); 1242 w83793_init_client(client);
1284 1243
@@ -1459,16 +1418,10 @@ exit_remove:
1459 for (i = 0; i < ARRAY_SIZE(w83793_temp); i++) 1418 for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
1460 device_remove_file(dev, &w83793_temp[i].dev_attr); 1419 device_remove_file(dev, &w83793_temp[i].dev_attr);
1461 1420
1462 if (data->lm75[0] != NULL) { 1421 if (data->lm75[0] != NULL)
1463 i2c_detach_client(data->lm75[0]); 1422 i2c_unregister_device(data->lm75[0]);
1464 kfree(data->lm75[0]); 1423 if (data->lm75[1] != NULL)
1465 } 1424 i2c_unregister_device(data->lm75[1]);
1466 if (data->lm75[1] != NULL) {
1467 i2c_detach_client(data->lm75[1]);
1468 kfree(data->lm75[1]);
1469 }
1470detach_client:
1471 i2c_detach_client(client);
1472free_mem: 1425free_mem:
1473 kfree(data); 1426 kfree(data);
1474exit: 1427exit:
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 52e268e25dab..ea295b9fc4f4 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -81,10 +81,11 @@ I2C_CLIENT_INSMOD_1(w83l785ts);
81 * Functions declaration 81 * Functions declaration
82 */ 82 */
83 83
84static int w83l785ts_attach_adapter(struct i2c_adapter *adapter); 84static int w83l785ts_probe(struct i2c_client *client,
85static int w83l785ts_detect(struct i2c_adapter *adapter, int address, 85 const struct i2c_device_id *id);
86 int kind); 86static int w83l785ts_detect(struct i2c_client *client, int kind,
87static int w83l785ts_detach_client(struct i2c_client *client); 87 struct i2c_board_info *info);
88static int w83l785ts_remove(struct i2c_client *client);
88static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval); 89static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval);
89static struct w83l785ts_data *w83l785ts_update_device(struct device *dev); 90static struct w83l785ts_data *w83l785ts_update_device(struct device *dev);
90 91
@@ -92,12 +93,22 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev);
92 * Driver data (common to all clients) 93 * Driver data (common to all clients)
93 */ 94 */
94 95
96static const struct i2c_device_id w83l785ts_id[] = {
97 { "w83l785ts", w83l785ts },
98 { }
99};
100MODULE_DEVICE_TABLE(i2c, w83l785ts_id);
101
95static struct i2c_driver w83l785ts_driver = { 102static struct i2c_driver w83l785ts_driver = {
103 .class = I2C_CLASS_HWMON,
96 .driver = { 104 .driver = {
97 .name = "w83l785ts", 105 .name = "w83l785ts",
98 }, 106 },
99 .attach_adapter = w83l785ts_attach_adapter, 107 .probe = w83l785ts_probe,
100 .detach_client = w83l785ts_detach_client, 108 .remove = w83l785ts_remove,
109 .id_table = w83l785ts_id,
110 .detect = w83l785ts_detect,
111 .address_data = &addr_data,
101}; 112};
102 113
103/* 114/*
@@ -105,7 +116,6 @@ static struct i2c_driver w83l785ts_driver = {
105 */ 116 */
106 117
107struct w83l785ts_data { 118struct w83l785ts_data {
108 struct i2c_client client;
109 struct device *hwmon_dev; 119 struct device *hwmon_dev;
110 struct mutex update_lock; 120 struct mutex update_lock;
111 char valid; /* zero until following fields are valid */ 121 char valid; /* zero until following fields are valid */
@@ -135,40 +145,14 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, 1);
135 * Real code 145 * Real code
136 */ 146 */
137 147
138static int w83l785ts_attach_adapter(struct i2c_adapter *adapter) 148/* Return 0 if detection is successful, -ENODEV otherwise */
139{ 149static int w83l785ts_detect(struct i2c_client *new_client, int kind,
140 if (!(adapter->class & I2C_CLASS_HWMON)) 150 struct i2c_board_info *info)
141 return 0;
142 return i2c_probe(adapter, &addr_data, w83l785ts_detect);
143}
144
145/*
146 * The following function does more than just detection. If detection
147 * succeeds, it also registers the new chip.
148 */
149static int w83l785ts_detect(struct i2c_adapter *adapter, int address, int kind)
150{ 151{
151 struct i2c_client *new_client; 152 struct i2c_adapter *adapter = new_client->adapter;
152 struct w83l785ts_data *data;
153 int err = 0;
154
155 153
156 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 154 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
157 goto exit; 155 return -ENODEV;
158
159 if (!(data = kzalloc(sizeof(struct w83l785ts_data), GFP_KERNEL))) {
160 err = -ENOMEM;
161 goto exit;
162 }
163
164 /* The common I2C client data is placed right before the
165 * W83L785TS-specific data. */
166 new_client = &data->client;
167 i2c_set_clientdata(new_client, data);
168 new_client->addr = address;
169 new_client->adapter = adapter;
170 new_client->driver = &w83l785ts_driver;
171 new_client->flags = 0;
172 156
173 /* 157 /*
174 * Now we do the remaining detection. A negative kind means that 158 * Now we do the remaining detection. A negative kind means that
@@ -188,8 +172,8 @@ static int w83l785ts_detect(struct i2c_adapter *adapter, int address, int kind)
188 W83L785TS_REG_TYPE, 0) & 0xFC) != 0x00)) { 172 W83L785TS_REG_TYPE, 0) & 0xFC) != 0x00)) {
189 dev_dbg(&adapter->dev, 173 dev_dbg(&adapter->dev,
190 "W83L785TS-S detection failed at 0x%02x.\n", 174 "W83L785TS-S detection failed at 0x%02x.\n",
191 address); 175 new_client->addr);
192 goto exit_free; 176 return -ENODEV;
193 } 177 }
194 } 178 }
195 179
@@ -214,22 +198,34 @@ static int w83l785ts_detect(struct i2c_adapter *adapter, int address, int kind)
214 dev_info(&adapter->dev, 198 dev_info(&adapter->dev,
215 "Unsupported chip (man_id=0x%04X, " 199 "Unsupported chip (man_id=0x%04X, "
216 "chip_id=0x%02X).\n", man_id, chip_id); 200 "chip_id=0x%02X).\n", man_id, chip_id);
217 goto exit_free; 201 return -ENODEV;
218 } 202 }
219 } 203 }
220 204
221 /* We can fill in the remaining client fields. */ 205 strlcpy(info->type, "w83l785ts", I2C_NAME_SIZE);
222 strlcpy(new_client->name, "w83l785ts", I2C_NAME_SIZE); 206
207 return 0;
208}
209
210static int w83l785ts_probe(struct i2c_client *new_client,
211 const struct i2c_device_id *id)
212{
213 struct w83l785ts_data *data;
214 int err = 0;
215
216 data = kzalloc(sizeof(struct w83l785ts_data), GFP_KERNEL);
217 if (!data) {
218 err = -ENOMEM;
219 goto exit;
220 }
221
222 i2c_set_clientdata(new_client, data);
223 data->valid = 0; 223 data->valid = 0;
224 mutex_init(&data->update_lock); 224 mutex_init(&data->update_lock);
225 225
226 /* Default values in case the first read fails (unlikely). */ 226 /* Default values in case the first read fails (unlikely). */
227 data->temp[1] = data->temp[0] = 0; 227 data->temp[1] = data->temp[0] = 0;
228 228
229 /* Tell the I2C layer a new client has arrived. */
230 if ((err = i2c_attach_client(new_client)))
231 goto exit_free;
232
233 /* 229 /*
234 * Initialize the W83L785TS chip 230 * Initialize the W83L785TS chip
235 * Nothing yet, assume it is already started. 231 * Nothing yet, assume it is already started.
@@ -259,25 +255,20 @@ exit_remove:
259 &sensor_dev_attr_temp1_input.dev_attr); 255 &sensor_dev_attr_temp1_input.dev_attr);
260 device_remove_file(&new_client->dev, 256 device_remove_file(&new_client->dev,
261 &sensor_dev_attr_temp1_max.dev_attr); 257 &sensor_dev_attr_temp1_max.dev_attr);
262 i2c_detach_client(new_client);
263exit_free:
264 kfree(data); 258 kfree(data);
265exit: 259exit:
266 return err; 260 return err;
267} 261}
268 262
269static int w83l785ts_detach_client(struct i2c_client *client) 263static int w83l785ts_remove(struct i2c_client *client)
270{ 264{
271 struct w83l785ts_data *data = i2c_get_clientdata(client); 265 struct w83l785ts_data *data = i2c_get_clientdata(client);
272 int err;
273 266
274 hwmon_device_unregister(data->hwmon_dev); 267 hwmon_device_unregister(data->hwmon_dev);
275 device_remove_file(&client->dev, 268 device_remove_file(&client->dev,
276 &sensor_dev_attr_temp1_input.dev_attr); 269 &sensor_dev_attr_temp1_input.dev_attr);
277 device_remove_file(&client->dev, 270 device_remove_file(&client->dev,
278 &sensor_dev_attr_temp1_max.dev_attr); 271 &sensor_dev_attr_temp1_max.dev_attr);
279 if ((err = i2c_detach_client(client)))
280 return err;
281 272
282 kfree(data); 273 kfree(data);
283 return 0; 274 return 0;
@@ -286,6 +277,18 @@ static int w83l785ts_detach_client(struct i2c_client *client)
286static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval) 277static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval)
287{ 278{
288 int value, i; 279 int value, i;
280 struct device *dev;
281 const char *prefix;
282
283 /* We might be called during detection, at which point the client
284 isn't yet fully initialized, so we can't use dev_dbg on it */
285 if (i2c_get_clientdata(client)) {
286 dev = &client->dev;
287 prefix = "";
288 } else {
289 dev = &client->adapter->dev;
290 prefix = "w83l785ts: ";
291 }
289 292
290 /* Frequent read errors have been reported on Asus boards, so we 293 /* Frequent read errors have been reported on Asus boards, so we
291 * retry on read errors. If it still fails (unlikely), return the 294 * retry on read errors. If it still fails (unlikely), return the
@@ -293,15 +296,15 @@ static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval)
293 for (i = 1; i <= MAX_RETRIES; i++) { 296 for (i = 1; i <= MAX_RETRIES; i++) {
294 value = i2c_smbus_read_byte_data(client, reg); 297 value = i2c_smbus_read_byte_data(client, reg);
295 if (value >= 0) { 298 if (value >= 0) {
296 dev_dbg(&client->dev, "Read 0x%02x from register " 299 dev_dbg(dev, "%sRead 0x%02x from register 0x%02x.\n",
297 "0x%02x.\n", value, reg); 300 prefix, value, reg);
298 return value; 301 return value;
299 } 302 }
300 dev_dbg(&client->dev, "Read failed, will retry in %d.\n", i); 303 dev_dbg(dev, "%sRead failed, will retry in %d.\n", prefix, i);
301 msleep(i); 304 msleep(i);
302 } 305 }
303 306
304 dev_err(&client->dev, "Couldn't read value from register 0x%02x.\n", 307 dev_err(dev, "%sCouldn't read value from register 0x%02x.\n", prefix,
305 reg); 308 reg);
306 return defval; 309 return defval;
307} 310}
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 41e22ddb568a..badca769f350 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -121,7 +121,6 @@ DIV_TO_REG(long val)
121} 121}
122 122
123struct w83l786ng_data { 123struct w83l786ng_data {
124 struct i2c_client client;
125 struct device *hwmon_dev; 124 struct device *hwmon_dev;
126 struct mutex update_lock; 125 struct mutex update_lock;
127 char valid; /* !=0 if following fields are valid */ 126 char valid; /* !=0 if following fields are valid */
@@ -146,18 +145,30 @@ struct w83l786ng_data {
146 u8 tolerance[2]; 145 u8 tolerance[2];
147}; 146};
148 147
149static int w83l786ng_attach_adapter(struct i2c_adapter *adapter); 148static int w83l786ng_probe(struct i2c_client *client,
150static int w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind); 149 const struct i2c_device_id *id);
151static int w83l786ng_detach_client(struct i2c_client *client); 150static int w83l786ng_detect(struct i2c_client *client, int kind,
151 struct i2c_board_info *info);
152static int w83l786ng_remove(struct i2c_client *client);
152static void w83l786ng_init_client(struct i2c_client *client); 153static void w83l786ng_init_client(struct i2c_client *client);
153static struct w83l786ng_data *w83l786ng_update_device(struct device *dev); 154static struct w83l786ng_data *w83l786ng_update_device(struct device *dev);
154 155
156static const struct i2c_device_id w83l786ng_id[] = {
157 { "w83l786ng", w83l786ng },
158 { }
159};
160MODULE_DEVICE_TABLE(i2c, w83l786ng_id);
161
155static struct i2c_driver w83l786ng_driver = { 162static struct i2c_driver w83l786ng_driver = {
163 .class = I2C_CLASS_HWMON,
156 .driver = { 164 .driver = {
157 .name = "w83l786ng", 165 .name = "w83l786ng",
158 }, 166 },
159 .attach_adapter = w83l786ng_attach_adapter, 167 .probe = w83l786ng_probe,
160 .detach_client = w83l786ng_detach_client, 168 .remove = w83l786ng_remove,
169 .id_table = w83l786ng_id,
170 .detect = w83l786ng_detect,
171 .address_data = &addr_data,
161}; 172};
162 173
163static u8 174static u8
@@ -575,42 +586,15 @@ static const struct attribute_group w83l786ng_group = {
575}; 586};
576 587
577static int 588static int
578w83l786ng_attach_adapter(struct i2c_adapter *adapter) 589w83l786ng_detect(struct i2c_client *client, int kind,
590 struct i2c_board_info *info)
579{ 591{
580 if (!(adapter->class & I2C_CLASS_HWMON)) 592 struct i2c_adapter *adapter = client->adapter;
581 return 0;
582 return i2c_probe(adapter, &addr_data, w83l786ng_detect);
583}
584
585static int
586w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind)
587{
588 struct i2c_client *client;
589 struct device *dev;
590 struct w83l786ng_data *data;
591 int i, err = 0;
592 u8 reg_tmp;
593 593
594 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 594 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
595 goto exit; 595 return -ENODEV;
596 }
597
598 /* OK. For now, we presume we have a valid client. We now create the
599 client structure, even though we cannot fill it completely yet.
600 But it allows us to access w83l786ng_{read,write}_value. */
601
602 if (!(data = kzalloc(sizeof(struct w83l786ng_data), GFP_KERNEL))) {
603 err = -ENOMEM;
604 goto exit;
605 } 596 }
606 597
607 client = &data->client;
608 dev = &client->dev;
609 i2c_set_clientdata(client, data);
610 client->addr = address;
611 client->adapter = adapter;
612 client->driver = &w83l786ng_driver;
613
614 /* 598 /*
615 * Now we do the remaining detection. A negative kind means that 599 * Now we do the remaining detection. A negative kind means that
616 * the driver was loaded with no force parameter (default), so we 600 * the driver was loaded with no force parameter (default), so we
@@ -627,8 +611,8 @@ w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind)
627 W83L786NG_REG_CONFIG) & 0x80) != 0x00)) { 611 W83L786NG_REG_CONFIG) & 0x80) != 0x00)) {
628 dev_dbg(&adapter->dev, 612 dev_dbg(&adapter->dev,
629 "W83L786NG detection failed at 0x%02x.\n", 613 "W83L786NG detection failed at 0x%02x.\n",
630 address); 614 client->addr);
631 goto exit_free; 615 return -ENODEV;
632 } 616 }
633 } 617 }
634 618
@@ -651,17 +635,31 @@ w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind)
651 dev_info(&adapter->dev, 635 dev_info(&adapter->dev,
652 "Unsupported chip (man_id=0x%04X, " 636 "Unsupported chip (man_id=0x%04X, "
653 "chip_id=0x%02X).\n", man_id, chip_id); 637 "chip_id=0x%02X).\n", man_id, chip_id);
654 goto exit_free; 638 return -ENODEV;
655 } 639 }
656 } 640 }
657 641
658 /* Fill in the remaining client fields and put into the global list */ 642 strlcpy(info->type, "w83l786ng", I2C_NAME_SIZE);
659 strlcpy(client->name, "w83l786ng", I2C_NAME_SIZE);
660 mutex_init(&data->update_lock);
661 643
662 /* Tell the I2C layer a new client has arrived */ 644 return 0;
663 if ((err = i2c_attach_client(client))) 645}
664 goto exit_free; 646
647static int
648w83l786ng_probe(struct i2c_client *client, const struct i2c_device_id *id)
649{
650 struct device *dev = &client->dev;
651 struct w83l786ng_data *data;
652 int i, err = 0;
653 u8 reg_tmp;
654
655 data = kzalloc(sizeof(struct w83l786ng_data), GFP_KERNEL);
656 if (!data) {
657 err = -ENOMEM;
658 goto exit;
659 }
660
661 i2c_set_clientdata(client, data);
662 mutex_init(&data->update_lock);
665 663
666 /* Initialize the chip */ 664 /* Initialize the chip */
667 w83l786ng_init_client(client); 665 w83l786ng_init_client(client);
@@ -693,25 +691,19 @@ w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind)
693 691
694exit_remove: 692exit_remove:
695 sysfs_remove_group(&client->dev.kobj, &w83l786ng_group); 693 sysfs_remove_group(&client->dev.kobj, &w83l786ng_group);
696 i2c_detach_client(client);
697exit_free:
698 kfree(data); 694 kfree(data);
699exit: 695exit:
700 return err; 696 return err;
701} 697}
702 698
703static int 699static int
704w83l786ng_detach_client(struct i2c_client *client) 700w83l786ng_remove(struct i2c_client *client)
705{ 701{
706 struct w83l786ng_data *data = i2c_get_clientdata(client); 702 struct w83l786ng_data *data = i2c_get_clientdata(client);
707 int err;
708 703
709 hwmon_device_unregister(data->hwmon_dev); 704 hwmon_device_unregister(data->hwmon_dev);
710 sysfs_remove_group(&client->dev.kobj, &w83l786ng_group); 705 sysfs_remove_group(&client->dev.kobj, &w83l786ng_group);
711 706
712 if ((err = i2c_detach_client(client)))
713 return err;
714
715 kfree(data); 707 kfree(data);
716 708
717 return 0; 709 return 0;
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
index 2f150e33c74c..72872d1e63ef 100644
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
@@ -155,6 +155,16 @@ static int __init amd756_s4882_init(void)
155 int i, error; 155 int i, error;
156 union i2c_smbus_data ioconfig; 156 union i2c_smbus_data ioconfig;
157 157
158 /* Configure the PCA9556 multiplexer */
159 ioconfig.byte = 0x00; /* All I/O to output mode */
160 error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03,
161 I2C_SMBUS_BYTE_DATA, &ioconfig);
162 if (error) {
163 dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n");
164 error = -EIO;
165 goto ERROR0;
166 }
167
158 /* Unregister physical bus */ 168 /* Unregister physical bus */
159 error = i2c_del_adapter(&amd756_smbus); 169 error = i2c_del_adapter(&amd756_smbus);
160 if (error) { 170 if (error) {
@@ -198,22 +208,11 @@ static int __init amd756_s4882_init(void)
198 s4882_algo[3].smbus_xfer = amd756_access_virt3; 208 s4882_algo[3].smbus_xfer = amd756_access_virt3;
199 s4882_algo[4].smbus_xfer = amd756_access_virt4; 209 s4882_algo[4].smbus_xfer = amd756_access_virt4;
200 210
201 /* Configure the PCA9556 multiplexer */
202 ioconfig.byte = 0x00; /* All I/O to output mode */
203 error = amd756_smbus.algo->smbus_xfer(&amd756_smbus, 0x18, 0,
204 I2C_SMBUS_WRITE, 0x03,
205 I2C_SMBUS_BYTE_DATA, &ioconfig);
206 if (error) {
207 dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n");
208 error = -EIO;
209 goto ERROR3;
210 }
211
212 /* Register virtual adapters */ 211 /* Register virtual adapters */
213 for (i = 0; i < 5; i++) { 212 for (i = 0; i < 5; i++) {
214 error = i2c_add_adapter(s4882_adapter+i); 213 error = i2c_add_adapter(s4882_adapter+i);
215 if (error) { 214 if (error) {
216 dev_err(&amd756_smbus.dev, 215 printk(KERN_ERR "i2c-amd756-s4882: "
217 "Virtual adapter %d registration " 216 "Virtual adapter %d registration "
218 "failed, module not inserted\n", i); 217 "failed, module not inserted\n", i);
219 for (i--; i >= 0; i--) 218 for (i--; i >= 0; i--)
@@ -252,8 +251,8 @@ static void __exit amd756_s4882_exit(void)
252 251
253 /* Restore physical bus */ 252 /* Restore physical bus */
254 if (i2c_add_adapter(&amd756_smbus)) 253 if (i2c_add_adapter(&amd756_smbus))
255 dev_err(&amd756_smbus.dev, "Physical bus restoration " 254 printk(KERN_ERR "i2c-amd756-s4882: "
256 "failed\n"); 255 "Physical bus restoration failed\n");
257} 256}
258 257
259MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); 258MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 10b9342a36c2..27443f073bc9 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -17,7 +17,8 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/platform_device.h> 20#include <linux/of_platform.h>
21#include <linux/of_i2c.h>
21 22
22#include <asm/io.h> 23#include <asm/io.h>
23#include <linux/fsl_devices.h> 24#include <linux/fsl_devices.h>
@@ -25,13 +26,13 @@
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
27 28
28#define MPC_I2C_ADDR 0x00 29#define DRV_NAME "mpc-i2c"
30
29#define MPC_I2C_FDR 0x04 31#define MPC_I2C_FDR 0x04
30#define MPC_I2C_CR 0x08 32#define MPC_I2C_CR 0x08
31#define MPC_I2C_SR 0x0c 33#define MPC_I2C_SR 0x0c
32#define MPC_I2C_DR 0x10 34#define MPC_I2C_DR 0x10
33#define MPC_I2C_DFSRR 0x14 35#define MPC_I2C_DFSRR 0x14
34#define MPC_I2C_REGION 0x20
35 36
36#define CCR_MEN 0x80 37#define CCR_MEN 0x80
37#define CCR_MIEN 0x40 38#define CCR_MIEN 0x40
@@ -315,102 +316,117 @@ static struct i2c_adapter mpc_ops = {
315 .timeout = 1, 316 .timeout = 1,
316}; 317};
317 318
318static int fsl_i2c_probe(struct platform_device *pdev) 319static int __devinit fsl_i2c_probe(struct of_device *op, const struct of_device_id *match)
319{ 320{
320 int result = 0; 321 int result = 0;
321 struct mpc_i2c *i2c; 322 struct mpc_i2c *i2c;
322 struct fsl_i2c_platform_data *pdata;
323 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
324
325 pdata = (struct fsl_i2c_platform_data *) pdev->dev.platform_data;
326 323
327 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); 324 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
328 if (!i2c) 325 if (!i2c)
329 return -ENOMEM; 326 return -ENOMEM;
330 327
331 i2c->irq = platform_get_irq(pdev, 0); 328 if (of_get_property(op->node, "dfsrr", NULL))
332 if (i2c->irq < 0) 329 i2c->flags |= FSL_I2C_DEV_SEPARATE_DFSRR;
333 i2c->irq = NO_IRQ; /* Use polling */
334 330
335 i2c->flags = pdata->device_flags; 331 if (of_device_is_compatible(op->node, "fsl,mpc5200-i2c") ||
336 init_waitqueue_head(&i2c->queue); 332 of_device_is_compatible(op->node, "mpc5200-i2c"))
333 i2c->flags |= FSL_I2C_DEV_CLOCK_5200;
337 334
338 i2c->base = ioremap((phys_addr_t)r->start, MPC_I2C_REGION); 335 init_waitqueue_head(&i2c->queue);
339 336
337 i2c->base = of_iomap(op->node, 0);
340 if (!i2c->base) { 338 if (!i2c->base) {
341 printk(KERN_ERR "i2c-mpc - failed to map controller\n"); 339 printk(KERN_ERR "i2c-mpc - failed to map controller\n");
342 result = -ENOMEM; 340 result = -ENOMEM;
343 goto fail_map; 341 goto fail_map;
344 } 342 }
345 343
346 if (i2c->irq != NO_IRQ) 344 i2c->irq = irq_of_parse_and_map(op->node, 0);
347 if ((result = request_irq(i2c->irq, mpc_i2c_isr, 345 if (i2c->irq != NO_IRQ) { /* i2c->irq = NO_IRQ implies polling */
348 IRQF_SHARED, "i2c-mpc", i2c)) < 0) { 346 result = request_irq(i2c->irq, mpc_i2c_isr,
349 printk(KERN_ERR 347 IRQF_SHARED, "i2c-mpc", i2c);
350 "i2c-mpc - failed to attach interrupt\n"); 348 if (result < 0) {
351 goto fail_irq; 349 printk(KERN_ERR "i2c-mpc - failed to attach interrupt\n");
350 goto fail_request;
352 } 351 }
353 352 }
353
354 mpc_i2c_setclock(i2c); 354 mpc_i2c_setclock(i2c);
355 platform_set_drvdata(pdev, i2c); 355
356 dev_set_drvdata(&op->dev, i2c);
356 357
357 i2c->adap = mpc_ops; 358 i2c->adap = mpc_ops;
358 i2c->adap.nr = pdev->id;
359 i2c_set_adapdata(&i2c->adap, i2c); 359 i2c_set_adapdata(&i2c->adap, i2c);
360 i2c->adap.dev.parent = &pdev->dev; 360 i2c->adap.dev.parent = &op->dev;
361 if ((result = i2c_add_numbered_adapter(&i2c->adap)) < 0) { 361
362 result = i2c_add_adapter(&i2c->adap);
363 if (result < 0) {
362 printk(KERN_ERR "i2c-mpc - failed to add adapter\n"); 364 printk(KERN_ERR "i2c-mpc - failed to add adapter\n");
363 goto fail_add; 365 goto fail_add;
364 } 366 }
367 of_register_i2c_devices(&i2c->adap, op->node);
365 368
366 return result; 369 return result;
367 370
368 fail_add: 371 fail_add:
369 if (i2c->irq != NO_IRQ) 372 dev_set_drvdata(&op->dev, NULL);
370 free_irq(i2c->irq, i2c); 373 free_irq(i2c->irq, i2c);
371 fail_irq: 374 fail_request:
372 iounmap(i2c->base); 375 irq_dispose_mapping(i2c->irq);
373 fail_map: 376 iounmap(i2c->base);
377 fail_map:
374 kfree(i2c); 378 kfree(i2c);
375 return result; 379 return result;
376}; 380};
377 381
378static int fsl_i2c_remove(struct platform_device *pdev) 382static int __devexit fsl_i2c_remove(struct of_device *op)
379{ 383{
380 struct mpc_i2c *i2c = platform_get_drvdata(pdev); 384 struct mpc_i2c *i2c = dev_get_drvdata(&op->dev);
381 385
382 i2c_del_adapter(&i2c->adap); 386 i2c_del_adapter(&i2c->adap);
383 platform_set_drvdata(pdev, NULL); 387 dev_set_drvdata(&op->dev, NULL);
384 388
385 if (i2c->irq != NO_IRQ) 389 if (i2c->irq != NO_IRQ)
386 free_irq(i2c->irq, i2c); 390 free_irq(i2c->irq, i2c);
387 391
392 irq_dispose_mapping(i2c->irq);
388 iounmap(i2c->base); 393 iounmap(i2c->base);
389 kfree(i2c); 394 kfree(i2c);
390 return 0; 395 return 0;
391}; 396};
392 397
393/* work with hotplug and coldplug */ 398static const struct of_device_id mpc_i2c_of_match[] = {
394MODULE_ALIAS("platform:fsl-i2c"); 399 {.compatible = "fsl-i2c",},
400 {},
401};
402MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
403
395 404
396/* Structure for a device driver */ 405/* Structure for a device driver */
397static struct platform_driver fsl_i2c_driver = { 406static struct of_platform_driver mpc_i2c_driver = {
398 .probe = fsl_i2c_probe, 407 .match_table = mpc_i2c_of_match,
399 .remove = fsl_i2c_remove, 408 .probe = fsl_i2c_probe,
400 .driver = { 409 .remove = __devexit_p(fsl_i2c_remove),
401 .owner = THIS_MODULE, 410 .driver = {
402 .name = "fsl-i2c", 411 .owner = THIS_MODULE,
412 .name = DRV_NAME,
403 }, 413 },
404}; 414};
405 415
406static int __init fsl_i2c_init(void) 416static int __init fsl_i2c_init(void)
407{ 417{
408 return platform_driver_register(&fsl_i2c_driver); 418 int rv;
419
420 rv = of_register_platform_driver(&mpc_i2c_driver);
421 if (rv)
422 printk(KERN_ERR DRV_NAME
423 " of_register_platform_driver failed (%i)\n", rv);
424 return rv;
409} 425}
410 426
411static void __exit fsl_i2c_exit(void) 427static void __exit fsl_i2c_exit(void)
412{ 428{
413 platform_driver_unregister(&fsl_i2c_driver); 429 of_unregister_platform_driver(&mpc_i2c_driver);
414} 430}
415 431
416module_init(fsl_i2c_init); 432module_init(fsl_i2c_init);
diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
index 6a8995dfd0bb..d1a4cbcf2aa4 100644
--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
@@ -150,6 +150,16 @@ static int __init nforce2_s4985_init(void)
150 int i, error; 150 int i, error;
151 union i2c_smbus_data ioconfig; 151 union i2c_smbus_data ioconfig;
152 152
153 /* Configure the PCA9556 multiplexer */
154 ioconfig.byte = 0x00; /* All I/O to output mode */
155 error = i2c_smbus_xfer(nforce2_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03,
156 I2C_SMBUS_BYTE_DATA, &ioconfig);
157 if (error) {
158 dev_err(&nforce2_smbus->dev, "PCA9556 configuration failed\n");
159 error = -EIO;
160 goto ERROR0;
161 }
162
153 /* Unregister physical bus */ 163 /* Unregister physical bus */
154 if (!nforce2_smbus) 164 if (!nforce2_smbus)
155 return -ENODEV; 165 return -ENODEV;
@@ -191,24 +201,13 @@ static int __init nforce2_s4985_init(void)
191 s4985_algo[3].smbus_xfer = nforce2_access_virt3; 201 s4985_algo[3].smbus_xfer = nforce2_access_virt3;
192 s4985_algo[4].smbus_xfer = nforce2_access_virt4; 202 s4985_algo[4].smbus_xfer = nforce2_access_virt4;
193 203
194 /* Configure the PCA9556 multiplexer */
195 ioconfig.byte = 0x00; /* All I/O to output mode */
196 error = nforce2_smbus->algo->smbus_xfer(nforce2_smbus, 0x18, 0,
197 I2C_SMBUS_WRITE, 0x03,
198 I2C_SMBUS_BYTE_DATA, &ioconfig);
199 if (error) {
200 dev_err(&nforce2_smbus->dev, "PCA9556 configuration failed\n");
201 error = -EIO;
202 goto ERROR3;
203 }
204
205 /* Register virtual adapters */ 204 /* Register virtual adapters */
206 for (i = 0; i < 5; i++) { 205 for (i = 0; i < 5; i++) {
207 error = i2c_add_adapter(s4985_adapter + i); 206 error = i2c_add_adapter(s4985_adapter + i);
208 if (error) { 207 if (error) {
209 dev_err(&nforce2_smbus->dev, 208 printk(KERN_ERR "i2c-nforce2-s4985: "
210 "Virtual adapter %d registration " 209 "Virtual adapter %d registration "
211 "failed, module not inserted\n", i); 210 "failed, module not inserted\n", i);
212 for (i--; i >= 0; i--) 211 for (i--; i >= 0; i--)
213 i2c_del_adapter(s4985_adapter + i); 212 i2c_del_adapter(s4985_adapter + i);
214 goto ERROR3; 213 goto ERROR3;
@@ -245,8 +244,8 @@ static void __exit nforce2_s4985_exit(void)
245 244
246 /* Restore physical bus */ 245 /* Restore physical bus */
247 if (i2c_add_adapter(nforce2_smbus)) 246 if (i2c_add_adapter(nforce2_smbus))
248 dev_err(&nforce2_smbus->dev, "Physical bus restoration " 247 printk(KERN_ERR "i2c-nforce2-s4985: "
249 "failed\n"); 248 "Physical bus restoration failed\n");
250} 249}
251 250
252MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); 251MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
index 373ea8d8fe8f..2c27193aeaa0 100644
--- a/drivers/i2c/chips/eeprom.c
+++ b/drivers/i2c/chips/eeprom.c
@@ -47,7 +47,6 @@ enum eeprom_nature {
47 47
48/* Each client has this additional data */ 48/* Each client has this additional data */
49struct eeprom_data { 49struct eeprom_data {
50 struct i2c_client client;
51 struct mutex update_lock; 50 struct mutex update_lock;
52 u8 valid; /* bitfield, bit!=0 if slice is valid */ 51 u8 valid; /* bitfield, bit!=0 if slice is valid */
53 unsigned long last_updated[8]; /* In jiffies, 8 slices */ 52 unsigned long last_updated[8]; /* In jiffies, 8 slices */
@@ -56,19 +55,6 @@ struct eeprom_data {
56}; 55};
57 56
58 57
59static int eeprom_attach_adapter(struct i2c_adapter *adapter);
60static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind);
61static int eeprom_detach_client(struct i2c_client *client);
62
63/* This is the driver that will be inserted */
64static struct i2c_driver eeprom_driver = {
65 .driver = {
66 .name = "eeprom",
67 },
68 .attach_adapter = eeprom_attach_adapter,
69 .detach_client = eeprom_detach_client,
70};
71
72static void eeprom_update_client(struct i2c_client *client, u8 slice) 58static void eeprom_update_client(struct i2c_client *client, u8 slice)
73{ 59{
74 struct eeprom_data *data = i2c_get_clientdata(client); 60 struct eeprom_data *data = i2c_get_clientdata(client);
@@ -148,25 +134,17 @@ static struct bin_attribute eeprom_attr = {
148 .read = eeprom_read, 134 .read = eeprom_read,
149}; 135};
150 136
151static int eeprom_attach_adapter(struct i2c_adapter *adapter) 137/* Return 0 if detection is successful, -ENODEV otherwise */
152{ 138static int eeprom_detect(struct i2c_client *client, int kind,
153 if (!(adapter->class & (I2C_CLASS_DDC | I2C_CLASS_SPD))) 139 struct i2c_board_info *info)
154 return 0;
155 return i2c_probe(adapter, &addr_data, eeprom_detect);
156}
157
158/* This function is called by i2c_probe */
159static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind)
160{ 140{
161 struct i2c_client *client; 141 struct i2c_adapter *adapter = client->adapter;
162 struct eeprom_data *data;
163 int err = 0;
164 142
165 /* EDID EEPROMs are often 24C00 EEPROMs, which answer to all 143 /* EDID EEPROMs are often 24C00 EEPROMs, which answer to all
166 addresses 0x50-0x57, but we only care about 0x50. So decline 144 addresses 0x50-0x57, but we only care about 0x50. So decline
167 attaching to addresses >= 0x51 on DDC buses */ 145 attaching to addresses >= 0x51 on DDC buses */
168 if (!(adapter->class & I2C_CLASS_SPD) && address >= 0x51) 146 if (!(adapter->class & I2C_CLASS_SPD) && client->addr >= 0x51)
169 goto exit; 147 return -ENODEV;
170 148
171 /* There are four ways we can read the EEPROM data: 149 /* There are four ways we can read the EEPROM data:
172 (1) I2C block reads (faster, but unsupported by most adapters) 150 (1) I2C block reads (faster, but unsupported by most adapters)
@@ -177,32 +155,33 @@ static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind)
177 because all known adapters support one of the first two. */ 155 because all known adapters support one of the first two. */
178 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA) 156 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)
179 && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) 157 && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK))
180 goto exit; 158 return -ENODEV;
159
160 strlcpy(info->type, "eeprom", I2C_NAME_SIZE);
161
162 return 0;
163}
164
165static int eeprom_probe(struct i2c_client *client,
166 const struct i2c_device_id *id)
167{
168 struct i2c_adapter *adapter = client->adapter;
169 struct eeprom_data *data;
170 int err;
181 171
182 if (!(data = kzalloc(sizeof(struct eeprom_data), GFP_KERNEL))) { 172 if (!(data = kzalloc(sizeof(struct eeprom_data), GFP_KERNEL))) {
183 err = -ENOMEM; 173 err = -ENOMEM;
184 goto exit; 174 goto exit;
185 } 175 }
186 176
187 client = &data->client;
188 memset(data->data, 0xff, EEPROM_SIZE); 177 memset(data->data, 0xff, EEPROM_SIZE);
189 i2c_set_clientdata(client, data); 178 i2c_set_clientdata(client, data);
190 client->addr = address;
191 client->adapter = adapter;
192 client->driver = &eeprom_driver;
193
194 /* Fill in the remaining client fields */
195 strlcpy(client->name, "eeprom", I2C_NAME_SIZE);
196 mutex_init(&data->update_lock); 179 mutex_init(&data->update_lock);
197 data->nature = UNKNOWN; 180 data->nature = UNKNOWN;
198 181
199 /* Tell the I2C layer a new client has arrived */
200 if ((err = i2c_attach_client(client)))
201 goto exit_kfree;
202
203 /* Detect the Vaio nature of EEPROMs. 182 /* Detect the Vaio nature of EEPROMs.
204 We use the "PCG-" or "VGN-" prefix as the signature. */ 183 We use the "PCG-" or "VGN-" prefix as the signature. */
205 if (address == 0x57 184 if (client->addr == 0x57
206 && i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) { 185 && i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
207 char name[4]; 186 char name[4];
208 187
@@ -221,33 +200,42 @@ static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind)
221 /* create the sysfs eeprom file */ 200 /* create the sysfs eeprom file */
222 err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr); 201 err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
223 if (err) 202 if (err)
224 goto exit_detach; 203 goto exit_kfree;
225 204
226 return 0; 205 return 0;
227 206
228exit_detach:
229 i2c_detach_client(client);
230exit_kfree: 207exit_kfree:
231 kfree(data); 208 kfree(data);
232exit: 209exit:
233 return err; 210 return err;
234} 211}
235 212
236static int eeprom_detach_client(struct i2c_client *client) 213static int eeprom_remove(struct i2c_client *client)
237{ 214{
238 int err;
239
240 sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr); 215 sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
241
242 err = i2c_detach_client(client);
243 if (err)
244 return err;
245
246 kfree(i2c_get_clientdata(client)); 216 kfree(i2c_get_clientdata(client));
247 217
248 return 0; 218 return 0;
249} 219}
250 220
221static const struct i2c_device_id eeprom_id[] = {
222 { "eeprom", 0 },
223 { }
224};
225
226static struct i2c_driver eeprom_driver = {
227 .driver = {
228 .name = "eeprom",
229 },
230 .probe = eeprom_probe,
231 .remove = eeprom_remove,
232 .id_table = eeprom_id,
233
234 .class = I2C_CLASS_DDC | I2C_CLASS_SPD,
235 .detect = eeprom_detect,
236 .address_data = &addr_data,
237};
238
251static int __init eeprom_init(void) 239static int __init eeprom_init(void)
252{ 240{
253 return i2c_add_driver(&eeprom_driver); 241 return i2c_add_driver(&eeprom_driver);
diff --git a/drivers/i2c/chips/max6875.c b/drivers/i2c/chips/max6875.c
index 5a0285d8b6f9..033d9d81ec8a 100644
--- a/drivers/i2c/chips/max6875.c
+++ b/drivers/i2c/chips/max6875.c
@@ -53,7 +53,7 @@ I2C_CLIENT_INSMOD_1(max6875);
53 53
54/* Each client has this additional data */ 54/* Each client has this additional data */
55struct max6875_data { 55struct max6875_data {
56 struct i2c_client client; 56 struct i2c_client *fake_client;
57 struct mutex update_lock; 57 struct mutex update_lock;
58 58
59 u32 valid; 59 u32 valid;
@@ -61,19 +61,6 @@ struct max6875_data {
61 unsigned long last_updated[USER_EEPROM_SLICES]; 61 unsigned long last_updated[USER_EEPROM_SLICES];
62}; 62};
63 63
64static int max6875_attach_adapter(struct i2c_adapter *adapter);
65static int max6875_detect(struct i2c_adapter *adapter, int address, int kind);
66static int max6875_detach_client(struct i2c_client *client);
67
68/* This is the driver that will be inserted */
69static struct i2c_driver max6875_driver = {
70 .driver = {
71 .name = "max6875",
72 },
73 .attach_adapter = max6875_attach_adapter,
74 .detach_client = max6875_detach_client,
75};
76
77static void max6875_update_slice(struct i2c_client *client, int slice) 64static void max6875_update_slice(struct i2c_client *client, int slice)
78{ 65{
79 struct max6875_data *data = i2c_get_clientdata(client); 66 struct max6875_data *data = i2c_get_clientdata(client);
@@ -159,96 +146,87 @@ static struct bin_attribute user_eeprom_attr = {
159 .read = max6875_read, 146 .read = max6875_read,
160}; 147};
161 148
162static int max6875_attach_adapter(struct i2c_adapter *adapter) 149/* Return 0 if detection is successful, -ENODEV otherwise */
150static int max6875_detect(struct i2c_client *client, int kind,
151 struct i2c_board_info *info)
163{ 152{
164 return i2c_probe(adapter, &addr_data, max6875_detect); 153 struct i2c_adapter *adapter = client->adapter;
165}
166
167/* This function is called by i2c_probe */
168static int max6875_detect(struct i2c_adapter *adapter, int address, int kind)
169{
170 struct i2c_client *real_client;
171 struct i2c_client *fake_client;
172 struct max6875_data *data;
173 int err;
174 154
175 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA 155 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA
176 | I2C_FUNC_SMBUS_READ_BYTE)) 156 | I2C_FUNC_SMBUS_READ_BYTE))
177 return 0; 157 return -ENODEV;
178 158
179 /* Only check even addresses */ 159 /* Only check even addresses */
180 if (address & 1) 160 if (client->addr & 1)
181 return 0; 161 return -ENODEV;
162
163 strlcpy(info->type, "max6875", I2C_NAME_SIZE);
164
165 return 0;
166}
167
168static int max6875_probe(struct i2c_client *client,
169 const struct i2c_device_id *id)
170{
171 struct max6875_data *data;
172 int err;
182 173
183 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) 174 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL)))
184 return -ENOMEM; 175 return -ENOMEM;
185 176
186 /* A fake client is created on the odd address */ 177 /* A fake client is created on the odd address */
187 if (!(fake_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) { 178 data->fake_client = i2c_new_dummy(client->adapter, client->addr + 1);
179 if (!data->fake_client) {
188 err = -ENOMEM; 180 err = -ENOMEM;
189 goto exit_kfree1; 181 goto exit_kfree;
190 } 182 }
191 183
192 /* Init real i2c_client */ 184 /* Init real i2c_client */
193 real_client = &data->client; 185 i2c_set_clientdata(client, data);
194 i2c_set_clientdata(real_client, data);
195 real_client->addr = address;
196 real_client->adapter = adapter;
197 real_client->driver = &max6875_driver;
198 strlcpy(real_client->name, "max6875", I2C_NAME_SIZE);
199 mutex_init(&data->update_lock); 186 mutex_init(&data->update_lock);
200 187
201 /* Init fake client data */ 188 err = sysfs_create_bin_file(&client->dev.kobj, &user_eeprom_attr);
202 i2c_set_clientdata(fake_client, NULL);
203 fake_client->addr = address | 1;
204 fake_client->adapter = adapter;
205 fake_client->driver = &max6875_driver;
206 strlcpy(fake_client->name, "max6875 subclient", I2C_NAME_SIZE);
207
208 if ((err = i2c_attach_client(real_client)) != 0)
209 goto exit_kfree2;
210
211 if ((err = i2c_attach_client(fake_client)) != 0)
212 goto exit_detach1;
213
214 err = sysfs_create_bin_file(&real_client->dev.kobj, &user_eeprom_attr);
215 if (err) 189 if (err)
216 goto exit_detach2; 190 goto exit_remove_fake;
217 191
218 return 0; 192 return 0;
219 193
220exit_detach2: 194exit_remove_fake:
221 i2c_detach_client(fake_client); 195 i2c_unregister_device(data->fake_client);
222exit_detach1: 196exit_kfree:
223 i2c_detach_client(real_client);
224exit_kfree2:
225 kfree(fake_client);
226exit_kfree1:
227 kfree(data); 197 kfree(data);
228 return err; 198 return err;
229} 199}
230 200
231/* Will be called for both the real client and the fake client */ 201static int max6875_remove(struct i2c_client *client)
232static int max6875_detach_client(struct i2c_client *client)
233{ 202{
234 int err;
235 struct max6875_data *data = i2c_get_clientdata(client); 203 struct max6875_data *data = i2c_get_clientdata(client);
236 204
237 /* data is NULL for the fake client */ 205 i2c_unregister_device(data->fake_client);
238 if (data)
239 sysfs_remove_bin_file(&client->dev.kobj, &user_eeprom_attr);
240 206
241 err = i2c_detach_client(client); 207 sysfs_remove_bin_file(&client->dev.kobj, &user_eeprom_attr);
242 if (err) 208 kfree(data);
243 return err;
244 209
245 if (data) /* real client */
246 kfree(data);
247 else /* fake client */
248 kfree(client);
249 return 0; 210 return 0;
250} 211}
251 212
213static const struct i2c_device_id max6875_id[] = {
214 { "max6875", 0 },
215 { }
216};
217
218static struct i2c_driver max6875_driver = {
219 .driver = {
220 .name = "max6875",
221 },
222 .probe = max6875_probe,
223 .remove = max6875_remove,
224 .id_table = max6875_id,
225
226 .detect = max6875_detect,
227 .address_data = &addr_data,
228};
229
252static int __init max6875_init(void) 230static int __init max6875_init(void)
253{ 231{
254 return i2c_add_driver(&max6875_driver); 232 return i2c_add_driver(&max6875_driver);
diff --git a/drivers/i2c/chips/pca9539.c b/drivers/i2c/chips/pca9539.c
index 58ab7f26be26..270de4e56a81 100644
--- a/drivers/i2c/chips/pca9539.c
+++ b/drivers/i2c/chips/pca9539.c
@@ -14,8 +14,8 @@
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/hwmon-sysfs.h> 15#include <linux/hwmon-sysfs.h>
16 16
17/* Addresses to scan */ 17/* Addresses to scan: none, device is not autodetected */
18static unsigned short normal_i2c[] = {0x74, 0x75, 0x76, 0x77, I2C_CLIENT_END}; 18static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
19 19
20/* Insmod parameters */ 20/* Insmod parameters */
21I2C_CLIENT_INSMOD_1(pca9539); 21I2C_CLIENT_INSMOD_1(pca9539);
@@ -32,23 +32,6 @@ enum pca9539_cmd
32 PCA9539_DIRECTION_1 = 7, 32 PCA9539_DIRECTION_1 = 7,
33}; 33};
34 34
35static int pca9539_attach_adapter(struct i2c_adapter *adapter);
36static int pca9539_detect(struct i2c_adapter *adapter, int address, int kind);
37static int pca9539_detach_client(struct i2c_client *client);
38
39/* This is the driver that will be inserted */
40static struct i2c_driver pca9539_driver = {
41 .driver = {
42 .name = "pca9539",
43 },
44 .attach_adapter = pca9539_attach_adapter,
45 .detach_client = pca9539_detach_client,
46};
47
48struct pca9539_data {
49 struct i2c_client client;
50};
51
52/* following are the sysfs callback functions */ 35/* following are the sysfs callback functions */
53static ssize_t pca9539_show(struct device *dev, struct device_attribute *attr, 36static ssize_t pca9539_show(struct device *dev, struct device_attribute *attr,
54 char *buf) 37 char *buf)
@@ -105,77 +88,51 @@ static struct attribute_group pca9539_defattr_group = {
105 .attrs = pca9539_attributes, 88 .attrs = pca9539_attributes,
106}; 89};
107 90
108static int pca9539_attach_adapter(struct i2c_adapter *adapter) 91/* Return 0 if detection is successful, -ENODEV otherwise */
92static int pca9539_detect(struct i2c_client *client, int kind,
93 struct i2c_board_info *info)
109{ 94{
110 return i2c_probe(adapter, &addr_data, pca9539_detect); 95 struct i2c_adapter *adapter = client->adapter;
111}
112
113/* This function is called by i2c_probe */
114static int pca9539_detect(struct i2c_adapter *adapter, int address, int kind)
115{
116 struct i2c_client *client;
117 struct pca9539_data *data;
118 int err = 0;
119 96
120 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 97 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
121 goto exit; 98 return -ENODEV;
122
123 /* OK. For now, we presume we have a valid client. We now create the
124 client structure, even though we cannot fill it completely yet. */
125 if (!(data = kzalloc(sizeof(struct pca9539_data), GFP_KERNEL))) {
126 err = -ENOMEM;
127 goto exit;
128 }
129
130 client = &data->client;
131 i2c_set_clientdata(client, data);
132 client->addr = address;
133 client->adapter = adapter;
134 client->driver = &pca9539_driver;
135
136 if (kind < 0) {
137 /* Detection: the pca9539 only has 8 registers (0-7).
138 A read of 7 should succeed, but a read of 8 should fail. */
139 if ((i2c_smbus_read_byte_data(client, 7) < 0) ||
140 (i2c_smbus_read_byte_data(client, 8) >= 0))
141 goto exit_kfree;
142 }
143
144 strlcpy(client->name, "pca9539", I2C_NAME_SIZE);
145
146 /* Tell the I2C layer a new client has arrived */
147 if ((err = i2c_attach_client(client)))
148 goto exit_kfree;
149 99
150 /* Register sysfs hooks */ 100 strlcpy(info->type, "pca9539", I2C_NAME_SIZE);
151 err = sysfs_create_group(&client->dev.kobj,
152 &pca9539_defattr_group);
153 if (err)
154 goto exit_detach;
155 101
156 return 0; 102 return 0;
157
158exit_detach:
159 i2c_detach_client(client);
160exit_kfree:
161 kfree(data);
162exit:
163 return err;
164} 103}
165 104
166static int pca9539_detach_client(struct i2c_client *client) 105static int pca9539_probe(struct i2c_client *client,
106 const struct i2c_device_id *id)
167{ 107{
168 int err; 108 /* Register sysfs hooks */
109 return sysfs_create_group(&client->dev.kobj,
110 &pca9539_defattr_group);
111}
169 112
113static int pca9539_remove(struct i2c_client *client)
114{
170 sysfs_remove_group(&client->dev.kobj, &pca9539_defattr_group); 115 sysfs_remove_group(&client->dev.kobj, &pca9539_defattr_group);
171
172 if ((err = i2c_detach_client(client)))
173 return err;
174
175 kfree(i2c_get_clientdata(client));
176 return 0; 116 return 0;
177} 117}
178 118
119static const struct i2c_device_id pca9539_id[] = {
120 { "pca9539", 0 },
121 { }
122};
123
124static struct i2c_driver pca9539_driver = {
125 .driver = {
126 .name = "pca9539",
127 },
128 .probe = pca9539_probe,
129 .remove = pca9539_remove,
130 .id_table = pca9539_id,
131
132 .detect = pca9539_detect,
133 .address_data = &addr_data,
134};
135
179static int __init pca9539_init(void) 136static int __init pca9539_init(void)
180{ 137{
181 return i2c_add_driver(&pca9539_driver); 138 return i2c_add_driver(&pca9539_driver);
diff --git a/drivers/i2c/chips/pcf8574.c b/drivers/i2c/chips/pcf8574.c
index 1b3db2b3ada9..6ec309894c88 100644
--- a/drivers/i2c/chips/pcf8574.c
+++ b/drivers/i2c/chips/pcf8574.c
@@ -38,37 +38,19 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/i2c.h> 39#include <linux/i2c.h>
40 40
41/* Addresses to scan */ 41/* Addresses to scan: none, device can't be detected */
42static const unsigned short normal_i2c[] = { 42static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
43 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
44 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
45 I2C_CLIENT_END
46};
47 43
48/* Insmod parameters */ 44/* Insmod parameters */
49I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a); 45I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a);
50 46
51/* Each client has this additional data */ 47/* Each client has this additional data */
52struct pcf8574_data { 48struct pcf8574_data {
53 struct i2c_client client;
54
55 int write; /* Remember last written value */ 49 int write; /* Remember last written value */
56}; 50};
57 51
58static int pcf8574_attach_adapter(struct i2c_adapter *adapter);
59static int pcf8574_detect(struct i2c_adapter *adapter, int address, int kind);
60static int pcf8574_detach_client(struct i2c_client *client);
61static void pcf8574_init_client(struct i2c_client *client); 52static void pcf8574_init_client(struct i2c_client *client);
62 53
63/* This is the driver that will be inserted */
64static struct i2c_driver pcf8574_driver = {
65 .driver = {
66 .name = "pcf8574",
67 },
68 .attach_adapter = pcf8574_attach_adapter,
69 .detach_client = pcf8574_detach_client,
70};
71
72/* following are the sysfs callback functions */ 54/* following are the sysfs callback functions */
73static ssize_t show_read(struct device *dev, struct device_attribute *attr, char *buf) 55static ssize_t show_read(struct device *dev, struct device_attribute *attr, char *buf)
74{ 56{
@@ -119,41 +101,22 @@ static const struct attribute_group pcf8574_attr_group = {
119 * Real code 101 * Real code
120 */ 102 */
121 103
122static int pcf8574_attach_adapter(struct i2c_adapter *adapter) 104/* Return 0 if detection is successful, -ENODEV otherwise */
123{ 105static int pcf8574_detect(struct i2c_client *client, int kind,
124 return i2c_probe(adapter, &addr_data, pcf8574_detect); 106 struct i2c_board_info *info)
125}
126
127/* This function is called by i2c_probe */
128static int pcf8574_detect(struct i2c_adapter *adapter, int address, int kind)
129{ 107{
130 struct i2c_client *client; 108 struct i2c_adapter *adapter = client->adapter;
131 struct pcf8574_data *data; 109 const char *client_name;
132 int err = 0;
133 const char *client_name = "";
134 110
135 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) 111 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
136 goto exit; 112 return -ENODEV;
137
138 /* OK. For now, we presume we have a valid client. We now create the
139 client structure, even though we cannot fill it completely yet. */
140 if (!(data = kzalloc(sizeof(struct pcf8574_data), GFP_KERNEL))) {
141 err = -ENOMEM;
142 goto exit;
143 }
144
145 client = &data->client;
146 i2c_set_clientdata(client, data);
147 client->addr = address;
148 client->adapter = adapter;
149 client->driver = &pcf8574_driver;
150 113
151 /* Now, we would do the remaining detection. But the PCF8574 is plainly 114 /* Now, we would do the remaining detection. But the PCF8574 is plainly
152 impossible to detect! Stupid chip. */ 115 impossible to detect! Stupid chip. */
153 116
154 /* Determine the chip type */ 117 /* Determine the chip type */
155 if (kind <= 0) { 118 if (kind <= 0) {
156 if (address >= 0x38 && address <= 0x3f) 119 if (client->addr >= 0x38 && client->addr <= 0x3f)
157 kind = pcf8574a; 120 kind = pcf8574a;
158 else 121 else
159 kind = pcf8574; 122 kind = pcf8574;
@@ -163,40 +126,43 @@ static int pcf8574_detect(struct i2c_adapter *adapter, int address, int kind)
163 client_name = "pcf8574a"; 126 client_name = "pcf8574a";
164 else 127 else
165 client_name = "pcf8574"; 128 client_name = "pcf8574";
129 strlcpy(info->type, client_name, I2C_NAME_SIZE);
166 130
167 /* Fill in the remaining client fields and put it into the global list */ 131 return 0;
168 strlcpy(client->name, client_name, I2C_NAME_SIZE); 132}
133
134static int pcf8574_probe(struct i2c_client *client,
135 const struct i2c_device_id *id)
136{
137 struct pcf8574_data *data;
138 int err;
139
140 data = kzalloc(sizeof(struct pcf8574_data), GFP_KERNEL);
141 if (!data) {
142 err = -ENOMEM;
143 goto exit;
144 }
145
146 i2c_set_clientdata(client, data);
169 147
170 /* Tell the I2C layer a new client has arrived */
171 if ((err = i2c_attach_client(client)))
172 goto exit_free;
173
174 /* Initialize the PCF8574 chip */ 148 /* Initialize the PCF8574 chip */
175 pcf8574_init_client(client); 149 pcf8574_init_client(client);
176 150
177 /* Register sysfs hooks */ 151 /* Register sysfs hooks */
178 err = sysfs_create_group(&client->dev.kobj, &pcf8574_attr_group); 152 err = sysfs_create_group(&client->dev.kobj, &pcf8574_attr_group);
179 if (err) 153 if (err)
180 goto exit_detach; 154 goto exit_free;
181 return 0; 155 return 0;
182 156
183 exit_detach:
184 i2c_detach_client(client);
185 exit_free: 157 exit_free:
186 kfree(data); 158 kfree(data);
187 exit: 159 exit:
188 return err; 160 return err;
189} 161}
190 162
191static int pcf8574_detach_client(struct i2c_client *client) 163static int pcf8574_remove(struct i2c_client *client)
192{ 164{
193 int err;
194
195 sysfs_remove_group(&client->dev.kobj, &pcf8574_attr_group); 165 sysfs_remove_group(&client->dev.kobj, &pcf8574_attr_group);
196
197 if ((err = i2c_detach_client(client)))
198 return err;
199
200 kfree(i2c_get_clientdata(client)); 166 kfree(i2c_get_clientdata(client));
201 return 0; 167 return 0;
202} 168}
@@ -208,6 +174,24 @@ static void pcf8574_init_client(struct i2c_client *client)
208 data->write = -EAGAIN; 174 data->write = -EAGAIN;
209} 175}
210 176
177static const struct i2c_device_id pcf8574_id[] = {
178 { "pcf8574", 0 },
179 { "pcf8574a", 0 },
180 { }
181};
182
183static struct i2c_driver pcf8574_driver = {
184 .driver = {
185 .name = "pcf8574",
186 },
187 .probe = pcf8574_probe,
188 .remove = pcf8574_remove,
189 .id_table = pcf8574_id,
190
191 .detect = pcf8574_detect,
192 .address_data = &addr_data,
193};
194
211static int __init pcf8574_init(void) 195static int __init pcf8574_init(void)
212{ 196{
213 return i2c_add_driver(&pcf8574_driver); 197 return i2c_add_driver(&pcf8574_driver);
diff --git a/drivers/i2c/chips/pcf8575.c b/drivers/i2c/chips/pcf8575.c
index 3ea08ac0bfa3..07fd7cb3c57d 100644
--- a/drivers/i2c/chips/pcf8575.c
+++ b/drivers/i2c/chips/pcf8575.c
@@ -32,11 +32,8 @@
32#include <linux/slab.h> /* kzalloc() */ 32#include <linux/slab.h> /* kzalloc() */
33#include <linux/sysfs.h> /* sysfs_create_group() */ 33#include <linux/sysfs.h> /* sysfs_create_group() */
34 34
35/* Addresses to scan */ 35/* Addresses to scan: none, device can't be detected */
36static const unsigned short normal_i2c[] = { 36static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
37 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
38 I2C_CLIENT_END
39};
40 37
41/* Insmod parameters */ 38/* Insmod parameters */
42I2C_CLIENT_INSMOD; 39I2C_CLIENT_INSMOD;
@@ -44,24 +41,9 @@ I2C_CLIENT_INSMOD;
44 41
45/* Each client has this additional data */ 42/* Each client has this additional data */
46struct pcf8575_data { 43struct pcf8575_data {
47 struct i2c_client client;
48 int write; /* last written value, or error code */ 44 int write; /* last written value, or error code */
49}; 45};
50 46
51static int pcf8575_attach_adapter(struct i2c_adapter *adapter);
52static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind);
53static int pcf8575_detach_client(struct i2c_client *client);
54
55/* This is the driver that will be inserted */
56static struct i2c_driver pcf8575_driver = {
57 .driver = {
58 .owner = THIS_MODULE,
59 .name = "pcf8575",
60 },
61 .attach_adapter = pcf8575_attach_adapter,
62 .detach_client = pcf8575_detach_client,
63};
64
65/* following are the sysfs callback functions */ 47/* following are the sysfs callback functions */
66static ssize_t show_read(struct device *dev, struct device_attribute *attr, 48static ssize_t show_read(struct device *dev, struct device_attribute *attr,
67 char *buf) 49 char *buf)
@@ -126,75 +108,77 @@ static const struct attribute_group pcf8575_attr_group = {
126 * Real code 108 * Real code
127 */ 109 */
128 110
129static int pcf8575_attach_adapter(struct i2c_adapter *adapter) 111/* Return 0 if detection is successful, -ENODEV otherwise */
112static int pcf8575_detect(struct i2c_client *client, int kind,
113 struct i2c_board_info *info)
130{ 114{
131 return i2c_probe(adapter, &addr_data, pcf8575_detect); 115 struct i2c_adapter *adapter = client->adapter;
116
117 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
118 return -ENODEV;
119
120 /* This is the place to detect whether the chip at the specified
121 address really is a PCF8575 chip. However, there is no method known
122 to detect whether an I2C chip is a PCF8575 or any other I2C chip. */
123
124 strlcpy(info->type, "pcf8575", I2C_NAME_SIZE);
125
126 return 0;
132} 127}
133 128
134/* This function is called by i2c_probe */ 129static int pcf8575_probe(struct i2c_client *client,
135static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind) 130 const struct i2c_device_id *id)
136{ 131{
137 struct i2c_client *client;
138 struct pcf8575_data *data; 132 struct pcf8575_data *data;
139 int err = 0; 133 int err;
140
141 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
142 goto exit;
143 134
144 /* OK. For now, we presume we have a valid client. We now create the
145 client structure, even though we cannot fill it completely yet. */
146 data = kzalloc(sizeof(struct pcf8575_data), GFP_KERNEL); 135 data = kzalloc(sizeof(struct pcf8575_data), GFP_KERNEL);
147 if (!data) { 136 if (!data) {
148 err = -ENOMEM; 137 err = -ENOMEM;
149 goto exit; 138 goto exit;
150 } 139 }
151 140
152 client = &data->client;
153 i2c_set_clientdata(client, data); 141 i2c_set_clientdata(client, data);
154 client->addr = address;
155 client->adapter = adapter;
156 client->driver = &pcf8575_driver;
157 strlcpy(client->name, "pcf8575", I2C_NAME_SIZE);
158 data->write = -EAGAIN; 142 data->write = -EAGAIN;
159 143
160 /* This is the place to detect whether the chip at the specified
161 address really is a PCF8575 chip. However, there is no method known
162 to detect whether an I2C chip is a PCF8575 or any other I2C chip. */
163
164 /* Tell the I2C layer a new client has arrived */
165 err = i2c_attach_client(client);
166 if (err)
167 goto exit_free;
168
169 /* Register sysfs hooks */ 144 /* Register sysfs hooks */
170 err = sysfs_create_group(&client->dev.kobj, &pcf8575_attr_group); 145 err = sysfs_create_group(&client->dev.kobj, &pcf8575_attr_group);
171 if (err) 146 if (err)
172 goto exit_detach; 147 goto exit_free;
173 148
174 return 0; 149 return 0;
175 150
176exit_detach:
177 i2c_detach_client(client);
178exit_free: 151exit_free:
179 kfree(data); 152 kfree(data);
180exit: 153exit:
181 return err; 154 return err;
182} 155}
183 156
184static int pcf8575_detach_client(struct i2c_client *client) 157static int pcf8575_remove(struct i2c_client *client)
185{ 158{
186 int err;
187
188 sysfs_remove_group(&client->dev.kobj, &pcf8575_attr_group); 159 sysfs_remove_group(&client->dev.kobj, &pcf8575_attr_group);
189
190 err = i2c_detach_client(client);
191 if (err)
192 return err;
193
194 kfree(i2c_get_clientdata(client)); 160 kfree(i2c_get_clientdata(client));
195 return 0; 161 return 0;
196} 162}
197 163
164static const struct i2c_device_id pcf8575_id[] = {
165 { "pcf8575", 0 },
166 { }
167};
168
169static struct i2c_driver pcf8575_driver = {
170 .driver = {
171 .owner = THIS_MODULE,
172 .name = "pcf8575",
173 },
174 .probe = pcf8575_probe,
175 .remove = pcf8575_remove,
176 .id_table = pcf8575_id,
177
178 .detect = pcf8575_detect,
179 .address_data = &addr_data,
180};
181
198static int __init pcf8575_init(void) 182static int __init pcf8575_init(void)
199{ 183{
200 return i2c_add_driver(&pcf8575_driver); 184 return i2c_add_driver(&pcf8575_driver);
diff --git a/drivers/i2c/chips/pcf8591.c b/drivers/i2c/chips/pcf8591.c
index db735379f22f..16ce3e193776 100644
--- a/drivers/i2c/chips/pcf8591.c
+++ b/drivers/i2c/chips/pcf8591.c
@@ -72,28 +72,15 @@ MODULE_PARM_DESC(input_mode,
72#define REG_TO_SIGNED(reg) (((reg) & 0x80)?((reg) - 256):(reg)) 72#define REG_TO_SIGNED(reg) (((reg) & 0x80)?((reg) - 256):(reg))
73 73
74struct pcf8591_data { 74struct pcf8591_data {
75 struct i2c_client client;
76 struct mutex update_lock; 75 struct mutex update_lock;
77 76
78 u8 control; 77 u8 control;
79 u8 aout; 78 u8 aout;
80}; 79};
81 80
82static int pcf8591_attach_adapter(struct i2c_adapter *adapter);
83static int pcf8591_detect(struct i2c_adapter *adapter, int address, int kind);
84static int pcf8591_detach_client(struct i2c_client *client);
85static void pcf8591_init_client(struct i2c_client *client); 81static void pcf8591_init_client(struct i2c_client *client);
86static int pcf8591_read_channel(struct device *dev, int channel); 82static int pcf8591_read_channel(struct device *dev, int channel);
87 83
88/* This is the driver that will be inserted */
89static struct i2c_driver pcf8591_driver = {
90 .driver = {
91 .name = "pcf8591",
92 },
93 .attach_adapter = pcf8591_attach_adapter,
94 .detach_client = pcf8591_detach_client,
95};
96
97/* following are the sysfs callback functions */ 84/* following are the sysfs callback functions */
98#define show_in_channel(channel) \ 85#define show_in_channel(channel) \
99static ssize_t show_in##channel##_input(struct device *dev, struct device_attribute *attr, char *buf) \ 86static ssize_t show_in##channel##_input(struct device *dev, struct device_attribute *attr, char *buf) \
@@ -180,58 +167,46 @@ static const struct attribute_group pcf8591_attr_group_opt = {
180/* 167/*
181 * Real code 168 * Real code
182 */ 169 */
183static int pcf8591_attach_adapter(struct i2c_adapter *adapter)
184{
185 return i2c_probe(adapter, &addr_data, pcf8591_detect);
186}
187 170
188/* This function is called by i2c_probe */ 171/* Return 0 if detection is successful, -ENODEV otherwise */
189static int pcf8591_detect(struct i2c_adapter *adapter, int address, int kind) 172static int pcf8591_detect(struct i2c_client *client, int kind,
173 struct i2c_board_info *info)
190{ 174{
191 struct i2c_client *client; 175 struct i2c_adapter *adapter = client->adapter;
192 struct pcf8591_data *data;
193 int err = 0;
194 176
195 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE 177 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE
196 | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) 178 | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
197 goto exit; 179 return -ENODEV;
180
181 /* Now, we would do the remaining detection. But the PCF8591 is plainly
182 impossible to detect! Stupid chip. */
183
184 strlcpy(info->type, "pcf8591", I2C_NAME_SIZE);
185
186 return 0;
187}
188
189static int pcf8591_probe(struct i2c_client *client,
190 const struct i2c_device_id *id)
191{
192 struct pcf8591_data *data;
193 int err;
198 194
199 /* OK. For now, we presume we have a valid client. We now create the
200 client structure, even though we cannot fill it completely yet. */
201 if (!(data = kzalloc(sizeof(struct pcf8591_data), GFP_KERNEL))) { 195 if (!(data = kzalloc(sizeof(struct pcf8591_data), GFP_KERNEL))) {
202 err = -ENOMEM; 196 err = -ENOMEM;
203 goto exit; 197 goto exit;
204 } 198 }
205 199
206 client = &data->client;
207 i2c_set_clientdata(client, data); 200 i2c_set_clientdata(client, data);
208 client->addr = address;
209 client->adapter = adapter;
210 client->driver = &pcf8591_driver;
211
212 /* Now, we would do the remaining detection. But the PCF8591 is plainly
213 impossible to detect! Stupid chip. */
214
215 /* Determine the chip type - only one kind supported! */
216 if (kind <= 0)
217 kind = pcf8591;
218
219 /* Fill in the remaining client fields and put it into the global
220 list */
221 strlcpy(client->name, "pcf8591", I2C_NAME_SIZE);
222 mutex_init(&data->update_lock); 201 mutex_init(&data->update_lock);
223 202
224 /* Tell the I2C layer a new client has arrived */
225 if ((err = i2c_attach_client(client)))
226 goto exit_kfree;
227
228 /* Initialize the PCF8591 chip */ 203 /* Initialize the PCF8591 chip */
229 pcf8591_init_client(client); 204 pcf8591_init_client(client);
230 205
231 /* Register sysfs hooks */ 206 /* Register sysfs hooks */
232 err = sysfs_create_group(&client->dev.kobj, &pcf8591_attr_group); 207 err = sysfs_create_group(&client->dev.kobj, &pcf8591_attr_group);
233 if (err) 208 if (err)
234 goto exit_detach; 209 goto exit_kfree;
235 210
236 /* Register input2 if not in "two differential inputs" mode */ 211 /* Register input2 if not in "two differential inputs" mode */
237 if (input_mode != 3) { 212 if (input_mode != 3) {
@@ -252,24 +227,16 @@ static int pcf8591_detect(struct i2c_adapter *adapter, int address, int kind)
252exit_sysfs_remove: 227exit_sysfs_remove:
253 sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt); 228 sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt);
254 sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group); 229 sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group);
255exit_detach:
256 i2c_detach_client(client);
257exit_kfree: 230exit_kfree:
258 kfree(data); 231 kfree(data);
259exit: 232exit:
260 return err; 233 return err;
261} 234}
262 235
263static int pcf8591_detach_client(struct i2c_client *client) 236static int pcf8591_remove(struct i2c_client *client)
264{ 237{
265 int err;
266
267 sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt); 238 sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt);
268 sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group); 239 sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group);
269
270 if ((err = i2c_detach_client(client)))
271 return err;
272
273 kfree(i2c_get_clientdata(client)); 240 kfree(i2c_get_clientdata(client));
274 return 0; 241 return 0;
275} 242}
@@ -316,6 +283,25 @@ static int pcf8591_read_channel(struct device *dev, int channel)
316 return (10 * value); 283 return (10 * value);
317} 284}
318 285
286static const struct i2c_device_id pcf8591_id[] = {
287 { "pcf8591", 0 },
288 { }
289};
290MODULE_DEVICE_TABLE(i2c, pcf8591_id);
291
292static struct i2c_driver pcf8591_driver = {
293 .driver = {
294 .name = "pcf8591",
295 },
296 .probe = pcf8591_probe,
297 .remove = pcf8591_remove,
298 .id_table = pcf8591_id,
299
300 .class = I2C_CLASS_HWMON, /* Nearest choice */
301 .detect = pcf8591_detect,
302 .address_data = &addr_data,
303};
304
319static int __init pcf8591_init(void) 305static int __init pcf8591_init(void)
320{ 306{
321 if (input_mode < 0 || input_mode > 3) { 307 if (input_mode < 0 || input_mode > 3) {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0a79f7661017..7608df83d6d1 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -654,6 +654,10 @@ int i2c_del_adapter(struct i2c_adapter *adap)
654 654
655 dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); 655 dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
656 656
657 /* Clear the device structure in case this adapter is ever going to be
658 added again */
659 memset(&adap->dev, 0, sizeof(adap->dev));
660
657 out_unlock: 661 out_unlock:
658 mutex_unlock(&core_lock); 662 mutex_unlock(&core_lock);
659 return res; 663 return res;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index cf707c8f08d4..15b09b89588a 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -98,6 +98,9 @@ if BLK_DEV_IDE
98 98
99comment "Please see Documentation/ide/ide.txt for help/info on IDE drives" 99comment "Please see Documentation/ide/ide.txt for help/info on IDE drives"
100 100
101config IDE_TIMINGS
102 bool
103
101config IDE_ATAPI 104config IDE_ATAPI
102 bool 105 bool
103 106
@@ -326,6 +329,7 @@ config BLK_DEV_PLATFORM
326config BLK_DEV_CMD640 329config BLK_DEV_CMD640
327 tristate "CMD640 chipset bugfix/support" 330 tristate "CMD640 chipset bugfix/support"
328 depends on X86 331 depends on X86
332 select IDE_TIMINGS
329 ---help--- 333 ---help---
330 The CMD-Technologies CMD640 IDE chip is used on many common 486 and 334 The CMD-Technologies CMD640 IDE chip is used on many common 486 and
331 Pentium motherboards, usually in combination with a "Neptune" or 335 Pentium motherboards, usually in combination with a "Neptune" or
@@ -455,6 +459,7 @@ config BLK_DEV_AEC62XX
455 459
456config BLK_DEV_ALI15X3 460config BLK_DEV_ALI15X3
457 tristate "ALI M15x3 chipset support" 461 tristate "ALI M15x3 chipset support"
462 select IDE_TIMINGS
458 select BLK_DEV_IDEDMA_PCI 463 select BLK_DEV_IDEDMA_PCI
459 help 464 help
460 This driver ensures (U)DMA support for ALI 1533, 1543 and 1543C 465 This driver ensures (U)DMA support for ALI 1533, 1543 and 1543C
@@ -469,6 +474,7 @@ config BLK_DEV_ALI15X3
469config BLK_DEV_AMD74XX 474config BLK_DEV_AMD74XX
470 tristate "AMD and nVidia IDE support" 475 tristate "AMD and nVidia IDE support"
471 depends on !ARM 476 depends on !ARM
477 select IDE_TIMINGS
472 select BLK_DEV_IDEDMA_PCI 478 select BLK_DEV_IDEDMA_PCI
473 help 479 help
474 This driver adds explicit support for AMD-7xx and AMD-8111 chips 480 This driver adds explicit support for AMD-7xx and AMD-8111 chips
@@ -489,6 +495,7 @@ config BLK_DEV_ATIIXP
489 495
490config BLK_DEV_CMD64X 496config BLK_DEV_CMD64X
491 tristate "CMD64{3|6|8|9} chipset support" 497 tristate "CMD64{3|6|8|9} chipset support"
498 select IDE_TIMINGS
492 select BLK_DEV_IDEDMA_PCI 499 select BLK_DEV_IDEDMA_PCI
493 help 500 help
494 Say Y here if you have an IDE controller which uses any of these 501 Say Y here if you have an IDE controller which uses any of these
@@ -503,6 +510,7 @@ config BLK_DEV_TRIFLEX
503 510
504config BLK_DEV_CY82C693 511config BLK_DEV_CY82C693
505 tristate "CY82C693 chipset support" 512 tristate "CY82C693 chipset support"
513 select IDE_TIMINGS
506 select BLK_DEV_IDEDMA_PCI 514 select BLK_DEV_IDEDMA_PCI
507 help 515 help
508 This driver adds detection and support for the CY82C693 chipset 516 This driver adds detection and support for the CY82C693 chipset
@@ -695,6 +703,7 @@ config BLK_DEV_SIS5513
695config BLK_DEV_SL82C105 703config BLK_DEV_SL82C105
696 tristate "Winbond SL82c105 support" 704 tristate "Winbond SL82c105 support"
697 depends on (PPC || ARM) 705 depends on (PPC || ARM)
706 select IDE_TIMINGS
698 select BLK_DEV_IDEDMA_PCI 707 select BLK_DEV_IDEDMA_PCI
699 help 708 help
700 If you have a Winbond SL82c105 IDE controller, say Y here to enable 709 If you have a Winbond SL82c105 IDE controller, say Y here to enable
@@ -725,6 +734,7 @@ config BLK_DEV_TRM290
725 734
726config BLK_DEV_VIA82CXXX 735config BLK_DEV_VIA82CXXX
727 tristate "VIA82CXXX chipset support" 736 tristate "VIA82CXXX chipset support"
737 select IDE_TIMINGS
728 select BLK_DEV_IDEDMA_PCI 738 select BLK_DEV_IDEDMA_PCI
729 help 739 help
730 This driver adds explicit support for VIA BusMastering IDE chips. 740 This driver adds explicit support for VIA BusMastering IDE chips.
@@ -751,6 +761,7 @@ endif
751config BLK_DEV_IDE_PMAC 761config BLK_DEV_IDE_PMAC
752 tristate "PowerMac on-board IDE support" 762 tristate "PowerMac on-board IDE support"
753 depends on PPC_PMAC && IDE=y && BLK_DEV_IDE=y 763 depends on PPC_PMAC && IDE=y && BLK_DEV_IDE=y
764 select IDE_TIMINGS
754 help 765 help
755 This driver provides support for the on-board IDE controller on 766 This driver provides support for the on-board IDE controller on
756 most of the recent Apple Power Macintoshes and PowerBooks. 767 most of the recent Apple Power Macintoshes and PowerBooks.
@@ -829,13 +840,6 @@ config BLK_DEV_IDE_RAPIDE
829 Say Y here if you want to support the Yellowstone RapIDE controller 840 Say Y here if you want to support the Yellowstone RapIDE controller
830 manufactured for use with Acorn computers. 841 manufactured for use with Acorn computers.
831 842
832config BLK_DEV_IDE_BAST
833 tristate "Simtec BAST / Thorcom VR1000 IDE support"
834 depends on ARM && (ARCH_BAST || MACH_VR1000)
835 help
836 Say Y here if you want to support the onboard IDE channels on the
837 Simtec BAST or the Thorcom VR1000
838
839config IDE_H8300 843config IDE_H8300
840 tristate "H8300 IDE support" 844 tristate "H8300 IDE support"
841 depends on H8300 845 depends on H8300
@@ -919,51 +923,12 @@ config BLK_DEV_Q40IDE
919config BLK_DEV_PALMCHIP_BK3710 923config BLK_DEV_PALMCHIP_BK3710
920 tristate "Palmchip bk3710 IDE controller support" 924 tristate "Palmchip bk3710 IDE controller support"
921 depends on ARCH_DAVINCI 925 depends on ARCH_DAVINCI
926 select IDE_TIMINGS
922 select BLK_DEV_IDEDMA_SFF 927 select BLK_DEV_IDEDMA_SFF
923 help 928 help
924 Say Y here if you want to support the onchip IDE controller on the 929 Say Y here if you want to support the onchip IDE controller on the
925 TI DaVinci SoC 930 TI DaVinci SoC
926 931
927
928config BLK_DEV_MPC8xx_IDE
929 tristate "MPC8xx IDE support"
930 depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE
931 help
932 This option provides support for IDE on Motorola MPC8xx Systems.
933 Please see 'Type of MPC8xx IDE interface' for details.
934
935 If unsure, say N.
936
937choice
938 prompt "Type of MPC8xx IDE interface"
939 depends on BLK_DEV_MPC8xx_IDE
940 default IDE_8xx_PCCARD
941
942config IDE_8xx_PCCARD
943 bool "8xx_PCCARD"
944 ---help---
945 Select how the IDE devices are connected to the MPC8xx system:
946
947 8xx_PCCARD uses the 8xx internal PCMCIA interface in combination
948 with a PC Card (e.g. ARGOSY portable Hard Disk Adapter),
949 ATA PC Card HDDs or ATA PC Flash Cards (example: TQM8xxL
950 systems)
951
952 8xx_DIRECT is used for directly connected IDE devices using the 8xx
953 internal PCMCIA interface (example: IVMS8 systems)
954
955 EXT_DIRECT is used for IDE devices directly connected to the 8xx
956 bus using some glue logic, but _not_ the 8xx internal
957 PCMCIA interface (example: IDIF860 systems)
958
959config IDE_8xx_DIRECT
960 bool "8xx_DIRECT"
961
962config IDE_EXT_DIRECT
963 bool "EXT_DIRECT"
964
965endchoice
966
967# no isa -> no vlb 932# no isa -> no vlb
968if ISA && (ALPHA || X86 || MIPS) 933if ISA && (ALPHA || X86 || MIPS)
969 934
@@ -981,6 +946,7 @@ config BLK_DEV_4DRIVES
981 946
982config BLK_DEV_ALI14XX 947config BLK_DEV_ALI14XX
983 tristate "ALI M14xx support" 948 tristate "ALI M14xx support"
949 select IDE_TIMINGS
984 help 950 help
985 This driver is enabled at runtime using the "ali14xx.probe" kernel 951 This driver is enabled at runtime using the "ali14xx.probe" kernel
986 boot parameter. It enables support for the secondary IDE interface 952 boot parameter. It enables support for the secondary IDE interface
@@ -1000,6 +966,7 @@ config BLK_DEV_DTC2278
1000 966
1001config BLK_DEV_HT6560B 967config BLK_DEV_HT6560B
1002 tristate "Holtek HT6560B support" 968 tristate "Holtek HT6560B support"
969 select IDE_TIMINGS
1003 help 970 help
1004 This driver is enabled at runtime using the "ht6560b.probe" kernel 971 This driver is enabled at runtime using the "ht6560b.probe" kernel
1005 boot parameter. It enables support for the secondary IDE interface 972 boot parameter. It enables support for the secondary IDE interface
@@ -1009,6 +976,7 @@ config BLK_DEV_HT6560B
1009 976
1010config BLK_DEV_QD65XX 977config BLK_DEV_QD65XX
1011 tristate "QDI QD65xx support" 978 tristate "QDI QD65xx support"
979 select IDE_TIMINGS
1012 help 980 help
1013 This driver is enabled at runtime using the "qd65xx.probe" kernel 981 This driver is enabled at runtime using the "qd65xx.probe" kernel
1014 boot parameter. It permits faster I/O speeds to be set. See the 982 boot parameter. It permits faster I/O speeds to be set. See the
@@ -1032,30 +1000,4 @@ config BLK_DEV_IDEDMA
1032 1000
1033endif 1001endif
1034 1002
1035config BLK_DEV_HD_ONLY
1036 bool "Old hard disk (MFM/RLL/IDE) driver"
1037 depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN
1038 help
1039 There are two drivers for MFM/RLL/IDE hard disks. Most people use
1040 the newer enhanced driver, but this old one is still around for two
1041 reasons. Some older systems have strange timing problems and seem to
1042 work only with the old driver (which itself does not work with some
1043 newer systems). The other reason is that the old driver is smaller,
1044 since it lacks the enhanced functionality of the new one. This makes
1045 it a good choice for systems with very tight memory restrictions, or
1046 for systems with only older MFM/RLL/ESDI drives. Choosing the old
1047 driver can save 13 KB or so of kernel memory.
1048
1049 If you want to use this driver together with the new one you have
1050 to use "hda=noprobe hdb=noprobe" kernel parameters to prevent the new
1051 driver from probing the primary interface.
1052
1053 If you are unsure, then just choose the Enhanced IDE/MFM/RLL driver
1054 instead of this one. For more detailed information, read the
1055 Disk-HOWTO, available from
1056 <http://www.tldp.org/docs.html#howto>.
1057
1058config BLK_DEV_HD
1059 def_bool BLK_DEV_HD_ONLY
1060
1061endif # IDE 1003endif # IDE
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index a2b3f84d710d..5d414e301a5a 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -11,9 +11,11 @@
11 11
12EXTRA_CFLAGS += -Idrivers/ide 12EXTRA_CFLAGS += -Idrivers/ide
13 13
14ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o 14ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o \
15 ide-pio-blacklist.o
15 16
16# core IDE code 17# core IDE code
18ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o
17ide-core-$(CONFIG_IDE_ATAPI) += ide-atapi.o 19ide-core-$(CONFIG_IDE_ATAPI) += ide-atapi.o
18ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o 20ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
19ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o 21ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
@@ -59,9 +61,3 @@ ifeq ($(CONFIG_BLK_DEV_PLATFORM), y)
59endif 61endif
60 62
61obj-$(CONFIG_BLK_DEV_IDE) += arm/ mips/ 63obj-$(CONFIG_BLK_DEV_IDE) += arm/ mips/
62
63# old hd driver must be last
64ifeq ($(CONFIG_BLK_DEV_HD), y)
65 hd-core-y += legacy/hd.o
66 obj-y += hd-core.o
67endif
diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
index 936e7b0237f5..5bc26053afa6 100644
--- a/drivers/ide/arm/Makefile
+++ b/drivers/ide/arm/Makefile
@@ -1,7 +1,6 @@
1 1
2obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o 2obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
3obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o 3obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
4obj-$(CONFIG_BLK_DEV_IDE_BAST) += bast-ide.o
5obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o 4obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
6 5
7ifeq ($(CONFIG_IDE_ARM), m) 6ifeq ($(CONFIG_IDE_ARM), m)
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
deleted file mode 100644
index 8e8c28104b45..000000000000
--- a/drivers/ide/arm/bast-ide.c
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * Copyright (c) 2003-2004 Simtec Electronics
3 * Ben Dooks <ben@simtec.co.uk>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9*/
10
11#include <linux/module.h>
12#include <linux/errno.h>
13#include <linux/ide.h>
14#include <linux/init.h>
15
16#include <asm/mach-types.h>
17
18#include <asm/io.h>
19#include <asm/irq.h>
20#include <asm/arch/map.h>
21#include <asm/arch/bast-map.h>
22#include <asm/arch/bast-irq.h>
23
24#define DRV_NAME "bast-ide"
25
26static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
27{
28 ide_hwif_t *hwif;
29 hw_regs_t hw;
30 int i;
31 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
32
33 memset(&hw, 0, sizeof(hw));
34
35 base += BAST_IDE_CS;
36 aux += BAST_IDE_CS;
37
38 for (i = 0; i <= 7; i++) {
39 hw.io_ports_array[i] = (unsigned long)base;
40 base += 0x20;
41 }
42
43 hw.io_ports.ctl_addr = aux + (6 * 0x20);
44 hw.irq = irq;
45 hw.chipset = ide_generic;
46
47 hwif = ide_find_port();
48 if (hwif == NULL)
49 goto out;
50
51 i = hwif->index;
52
53 ide_init_port_data(hwif, i);
54 ide_init_port_hw(hwif, &hw);
55 hwif->port_ops = NULL;
56
57 idx[0] = i;
58
59 ide_device_add(idx, NULL);
60out:
61 return 0;
62}
63
64static int __init bastide_init(void)
65{
66 unsigned long base = BAST_VA_IDEPRI + BAST_IDE_CS;
67
68 /* we can treat the VR1000 and the BAST the same */
69
70 if (!(machine_is_bast() || machine_is_vr1000()))
71 return 0;
72
73 printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n");
74
75 if (!request_mem_region(base, 0x400000, DRV_NAME)) {
76 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
77 return -EBUSY;
78 }
79
80 bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0);
81 bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1);
82
83 return 0;
84}
85
86module_init(bastide_init);
87
88MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
89MODULE_LICENSE("GPL");
90MODULE_DESCRIPTION("Simtec BAST / Thorcom VR1000 IDE driver");
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 061456914ca3..52f58c885783 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -21,6 +21,8 @@
21#include <asm/dma.h> 21#include <asm/dma.h>
22#include <asm/ecard.h> 22#include <asm/ecard.h>
23 23
24#define DRV_NAME "icside"
25
24#define ICS_IDENT_OFFSET 0x2280 26#define ICS_IDENT_OFFSET 0x2280
25 27
26#define ICS_ARCIN_V5_INTRSTAT 0x0000 28#define ICS_ARCIN_V5_INTRSTAT 0x0000
@@ -68,6 +70,7 @@ struct icside_state {
68 unsigned int enabled; 70 unsigned int enabled;
69 void __iomem *irq_port; 71 void __iomem *irq_port;
70 void __iomem *ioc_base; 72 void __iomem *ioc_base;
73 unsigned int sel;
71 unsigned int type; 74 unsigned int type;
72 ide_hwif_t *hwif[2]; 75 ide_hwif_t *hwif[2];
73}; 76};
@@ -165,7 +168,8 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
165static void icside_maskproc(ide_drive_t *drive, int mask) 168static void icside_maskproc(ide_drive_t *drive, int mask)
166{ 169{
167 ide_hwif_t *hwif = HWIF(drive); 170 ide_hwif_t *hwif = HWIF(drive);
168 struct icside_state *state = hwif->hwif_data; 171 struct expansion_card *ec = ECARD_DEV(hwif->dev);
172 struct icside_state *state = ecard_get_drvdata(ec);
169 unsigned long flags; 173 unsigned long flags;
170 174
171 local_irq_save(flags); 175 local_irq_save(flags);
@@ -308,6 +312,7 @@ static int icside_dma_setup(ide_drive_t *drive)
308{ 312{
309 ide_hwif_t *hwif = HWIF(drive); 313 ide_hwif_t *hwif = HWIF(drive);
310 struct expansion_card *ec = ECARD_DEV(hwif->dev); 314 struct expansion_card *ec = ECARD_DEV(hwif->dev);
315 struct icside_state *state = ecard_get_drvdata(ec);
311 struct request *rq = hwif->hwgroup->rq; 316 struct request *rq = hwif->hwgroup->rq;
312 unsigned int dma_mode; 317 unsigned int dma_mode;
313 318
@@ -331,7 +336,7 @@ static int icside_dma_setup(ide_drive_t *drive)
331 /* 336 /*
332 * Route the DMA signals to the correct interface. 337 * Route the DMA signals to the correct interface.
333 */ 338 */
334 writeb(hwif->select_data, hwif->config_data); 339 writeb(state->sel | hwif->channel, state->ioc_base);
335 340
336 /* 341 /*
337 * Select the correct timing for this drive. 342 * Select the correct timing for this drive.
@@ -359,7 +364,8 @@ static void icside_dma_exec_cmd(ide_drive_t *drive, u8 cmd)
359static int icside_dma_test_irq(ide_drive_t *drive) 364static int icside_dma_test_irq(ide_drive_t *drive)
360{ 365{
361 ide_hwif_t *hwif = HWIF(drive); 366 ide_hwif_t *hwif = HWIF(drive);
362 struct icside_state *state = hwif->hwif_data; 367 struct expansion_card *ec = ECARD_DEV(hwif->dev);
368 struct icside_state *state = ecard_get_drvdata(ec);
363 369
364 return readb(state->irq_port + 370 return readb(state->irq_port +
365 (hwif->channel ? 371 (hwif->channel ?
@@ -411,36 +417,24 @@ static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
411 return -EOPNOTSUPP; 417 return -EOPNOTSUPP;
412} 418}
413 419
414static ide_hwif_t * 420static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
415icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec) 421 struct cardinfo *info, struct expansion_card *ec)
416{ 422{
417 unsigned long port = (unsigned long)base + info->dataoffset; 423 unsigned long port = (unsigned long)base + info->dataoffset;
418 ide_hwif_t *hwif;
419 424
420 hwif = ide_find_port(); 425 hw->io_ports.data_addr = port;
421 if (hwif) { 426 hw->io_ports.error_addr = port + (1 << info->stepping);
422 /* 427 hw->io_ports.nsect_addr = port + (2 << info->stepping);
423 * Ensure we're using MMIO 428 hw->io_ports.lbal_addr = port + (3 << info->stepping);
424 */ 429 hw->io_ports.lbam_addr = port + (4 << info->stepping);
425 default_hwif_mmiops(hwif); 430 hw->io_ports.lbah_addr = port + (5 << info->stepping);
426 431 hw->io_ports.device_addr = port + (6 << info->stepping);
427 hwif->io_ports.data_addr = port; 432 hw->io_ports.status_addr = port + (7 << info->stepping);
428 hwif->io_ports.error_addr = port + (1 << info->stepping); 433 hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset;
429 hwif->io_ports.nsect_addr = port + (2 << info->stepping); 434
430 hwif->io_ports.lbal_addr = port + (3 << info->stepping); 435 hw->irq = ec->irq;
431 hwif->io_ports.lbam_addr = port + (4 << info->stepping); 436 hw->dev = &ec->dev;
432 hwif->io_ports.lbah_addr = port + (5 << info->stepping); 437 hw->chipset = ide_acorn;
433 hwif->io_ports.device_addr = port + (6 << info->stepping);
434 hwif->io_ports.status_addr = port + (7 << info->stepping);
435 hwif->io_ports.ctl_addr =
436 (unsigned long)base + info->ctrloffset;
437 hwif->irq = ec->irq;
438 hwif->chipset = ide_acorn;
439 hwif->gendev.parent = &ec->dev;
440 hwif->dev = &ec->dev;
441 }
442
443 return hwif;
444} 438}
445 439
446static int __init 440static int __init
@@ -449,6 +443,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
449 ide_hwif_t *hwif; 443 ide_hwif_t *hwif;
450 void __iomem *base; 444 void __iomem *base;
451 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 445 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
446 hw_regs_t hw;
452 447
453 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); 448 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
454 if (!base) 449 if (!base)
@@ -466,12 +461,19 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
466 */ 461 */
467 icside_irqdisable_arcin_v5(ec, 0); 462 icside_irqdisable_arcin_v5(ec, 0);
468 463
469 hwif = icside_setup(base, &icside_cardinfo_v5, ec); 464 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
465
466 hwif = ide_find_port();
470 if (!hwif) 467 if (!hwif)
471 return -ENODEV; 468 return -ENODEV;
472 469
470 ide_init_port_hw(hwif, &hw);
471 default_hwif_mmiops(hwif);
472
473 state->hwif[0] = hwif; 473 state->hwif[0] = hwif;
474 474
475 ecard_set_drvdata(ec, state);
476
475 idx[0] = hwif->index; 477 idx[0] = hwif->index;
476 478
477 ide_device_add(idx, NULL); 479 ide_device_add(idx, NULL);
@@ -497,6 +499,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
497 int ret; 499 int ret;
498 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 500 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
499 struct ide_port_info d = icside_v6_port_info; 501 struct ide_port_info d = icside_v6_port_info;
502 hw_regs_t hw[2];
500 503
501 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); 504 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
502 if (!ioc_base) { 505 if (!ioc_base) {
@@ -525,43 +528,47 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
525 528
526 state->irq_port = easi_base; 529 state->irq_port = easi_base;
527 state->ioc_base = ioc_base; 530 state->ioc_base = ioc_base;
531 state->sel = sel;
528 532
529 /* 533 /*
530 * Be on the safe side - disable interrupts 534 * Be on the safe side - disable interrupts
531 */ 535 */
532 icside_irqdisable_arcin_v6(ec, 0); 536 icside_irqdisable_arcin_v6(ec, 0);
533 537
538 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
539 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
540
534 /* 541 /*
535 * Find and register the interfaces. 542 * Find and register the interfaces.
536 */ 543 */
537 hwif = icside_setup(easi_base, &icside_cardinfo_v6_1, ec); 544 hwif = ide_find_port();
538 mate = icside_setup(easi_base, &icside_cardinfo_v6_2, ec); 545 if (hwif == NULL)
546 return -ENODEV;
539 547
540 if (!hwif || !mate) { 548 ide_init_port_hw(hwif, &hw[0]);
541 ret = -ENODEV; 549 default_hwif_mmiops(hwif);
542 goto out; 550
551 idx[0] = hwif->index;
552
553 mate = ide_find_port();
554 if (mate) {
555 ide_init_port_hw(mate, &hw[1]);
556 default_hwif_mmiops(mate);
557
558 idx[1] = mate->index;
543 } 559 }
544 560
545 state->hwif[0] = hwif; 561 state->hwif[0] = hwif;
546 state->hwif[1] = mate; 562 state->hwif[1] = mate;
547 563
548 hwif->hwif_data = state; 564 ecard_set_drvdata(ec, state);
549 hwif->config_data = (unsigned long)ioc_base;
550 hwif->select_data = sel;
551
552 mate->hwif_data = state;
553 mate->config_data = (unsigned long)ioc_base;
554 mate->select_data = sel | 1;
555 565
556 if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) { 566 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
557 d.init_dma = icside_dma_init; 567 d.init_dma = icside_dma_init;
558 d.port_ops = &icside_v6_port_ops; 568 d.port_ops = &icside_v6_port_ops;
559 d.dma_ops = NULL; 569 d.dma_ops = NULL;
560 } 570 }
561 571
562 idx[0] = hwif->index;
563 idx[1] = mate->index;
564
565 ide_device_add(idx, &d); 572 ide_device_add(idx, &d);
566 573
567 return 0; 574 return 0;
@@ -627,10 +634,8 @@ icside_probe(struct expansion_card *ec, const struct ecard_id *id)
627 break; 634 break;
628 } 635 }
629 636
630 if (ret == 0) { 637 if (ret == 0)
631 ecard_set_drvdata(ec, state);
632 goto out; 638 goto out;
633 }
634 639
635 kfree(state); 640 kfree(state);
636 release: 641 release:
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index 3839f5722985..c79b85b6e4a3 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -74,8 +74,6 @@ struct palm_bk3710_udmatiming {
74#define BK3710_IORDYTMP 0x78 74#define BK3710_IORDYTMP 0x78
75#define BK3710_IORDYTMS 0x7C 75#define BK3710_IORDYTMS 0x7C
76 76
77#include "../ide-timing.h"
78
79static unsigned ideclk_period; /* in nanoseconds */ 77static unsigned ideclk_period; /* in nanoseconds */
80 78
81static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = { 79static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
@@ -402,7 +400,6 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
402 400
403 i = hwif->index; 401 i = hwif->index;
404 402
405 ide_init_port_data(hwif, i);
406 ide_init_port_hw(hwif, &hw); 403 ide_init_port_hw(hwif, &hw);
407 404
408 default_hwif_mmiops(hwif); 405 default_hwif_mmiops(hwif);
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index 1747b2358775..43057e0303c8 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -11,6 +11,10 @@
11 11
12#include <asm/ecard.h> 12#include <asm/ecard.h>
13 13
14static struct const ide_port_info rapide_port_info = {
15 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
16};
17
14static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, 18static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
15 void __iomem *ctrl, unsigned int sz, int irq) 19 void __iomem *ctrl, unsigned int sz, int irq)
16{ 20{
@@ -44,25 +48,26 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
44 goto release; 48 goto release;
45 } 49 }
46 50
47 hwif = ide_find_port(); 51 memset(&hw, 0, sizeof(hw));
48 if (hwif) { 52 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
49 memset(&hw, 0, sizeof(hw)); 53 hw.chipset = ide_generic;
50 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); 54 hw.dev = &ec->dev;
51 hw.chipset = ide_generic;
52 hw.dev = &ec->dev;
53 55
54 ide_init_port_hw(hwif, &hw); 56 hwif = ide_find_port();
57 if (hwif == NULL) {
58 ret = -ENOENT;
59 goto release;
60 }
55 61
56 hwif->host_flags = IDE_HFLAG_MMIO; 62 ide_init_port_hw(hwif, &hw);
57 default_hwif_mmiops(hwif); 63 default_hwif_mmiops(hwif);
58 64
59 idx[0] = hwif->index; 65 idx[0] = hwif->index;
60 66
61 ide_device_add(idx, NULL); 67 ide_device_add(idx, &rapide_port_info);
62 68
63 ecard_set_drvdata(ec, hwif); 69 ecard_set_drvdata(ec, hwif);
64 goto out; 70 goto out;
65 }
66 71
67 release: 72 release:
68 ecard_release_resources(ec); 73 ecard_release_resources(ec);
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index ae37ee58bae2..20fad6d542cc 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -8,6 +8,8 @@
8#include <asm/io.h> 8#include <asm/io.h>
9#include <asm/irq.h> 9#include <asm/irq.h>
10 10
11#define DRV_NAME "ide-h8300"
12
11#define bswap(d) \ 13#define bswap(d) \
12({ \ 14({ \
13 u16 r; \ 15 u16 r; \
@@ -176,6 +178,10 @@ static inline void hwif_setup(ide_hwif_t *hwif)
176 hwif->output_data = h8300_output_data; 178 hwif->output_data = h8300_output_data;
177} 179}
178 180
181static const struct ide_port_info h8300_port_info = {
182 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
183};
184
179static int __init h8300_ide_init(void) 185static int __init h8300_ide_init(void)
180{ 186{
181 hw_regs_t hw; 187 hw_regs_t hw;
@@ -183,6 +189,8 @@ static int __init h8300_ide_init(void)
183 int index; 189 int index;
184 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 190 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
185 191
192 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
193
186 if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300")) 194 if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300"))
187 goto out_busy; 195 goto out_busy;
188 if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) { 196 if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) {
@@ -192,22 +200,17 @@ static int __init h8300_ide_init(void)
192 200
193 hw_setup(&hw); 201 hw_setup(&hw);
194 202
195 hwif = ide_find_port(); 203 hwif = ide_find_port_slot(&h8300_port_info);
196 if (hwif == NULL) { 204 if (hwif == NULL)
197 printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
198 return -ENOENT; 205 return -ENOENT;
199 }
200 206
201 index = hwif->index; 207 index = hwif->index;
202 ide_init_port_data(hwif, index);
203 ide_init_port_hw(hwif, &hw); 208 ide_init_port_hw(hwif, &hw);
204 hwif_setup(hwif); 209 hwif_setup(hwif);
205 hwif->host_flags = IDE_HFLAG_NO_IO_32BIT;
206 printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", index);
207 210
208 idx[0] = index; 211 idx[0] = index;
209 212
210 ide_device_add(idx, NULL); 213 ide_device_add(idx, &h8300_port_info);
211 214
212 return 0; 215 return 0;
213 216
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index d99847157186..6e29dd532090 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -517,14 +517,9 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
517 int xferlen, 517 int xferlen,
518 ide_handler_t *handler) 518 ide_handler_t *handler)
519{ 519{
520 ide_startstop_t startstop;
521 struct cdrom_info *info = drive->driver_data; 520 struct cdrom_info *info = drive->driver_data;
522 ide_hwif_t *hwif = drive->hwif; 521 ide_hwif_t *hwif = drive->hwif;
523 522
524 /* wait for the controller to be idle */
525 if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY))
526 return startstop;
527
528 /* FIXME: for Virtual DMA we must check harder */ 523 /* FIXME: for Virtual DMA we must check harder */
529 if (info->dma) 524 if (info->dma)
530 info->dma = !hwif->dma_ops->dma_setup(drive); 525 info->dma = !hwif->dma_ops->dma_setup(drive);
@@ -604,28 +599,6 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
604} 599}
605 600
606/* 601/*
607 * Block read functions.
608 */
609static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len)
610{
611 while (len > 0) {
612 int dum = 0;
613 xf(drive, NULL, &dum, sizeof(dum));
614 len -= sizeof(dum);
615 }
616}
617
618static void ide_cd_drain_data(ide_drive_t *drive, int nsects)
619{
620 while (nsects > 0) {
621 static char dum[SECTOR_SIZE];
622
623 drive->hwif->input_data(drive, NULL, dum, sizeof(dum));
624 nsects--;
625 }
626}
627
628/*
629 * Check the contents of the interrupt reason register from the cdrom 602 * Check the contents of the interrupt reason register from the cdrom
630 * and attempt to recover if there are problems. Returns 0 if everything's 603 * and attempt to recover if there are problems. Returns 0 if everything's
631 * ok; nonzero if the request has been terminated. 604 * ok; nonzero if the request has been terminated.
@@ -640,15 +613,12 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
640 if (ireason == (!rw << 1)) 613 if (ireason == (!rw << 1))
641 return 0; 614 return 0;
642 else if (ireason == (rw << 1)) { 615 else if (ireason == (rw << 1)) {
643 ide_hwif_t *hwif = drive->hwif;
644 xfer_func_t *xf;
645 616
646 /* whoops... */ 617 /* whoops... */
647 printk(KERN_ERR "%s: %s: wrong transfer direction!\n", 618 printk(KERN_ERR "%s: %s: wrong transfer direction!\n",
648 drive->name, __func__); 619 drive->name, __func__);
649 620
650 xf = rw ? hwif->output_data : hwif->input_data; 621 ide_pad_transfer(drive, rw, len);
651 ide_cd_pad_transfer(drive, xf, len);
652 } else if (rw == 0 && ireason == 1) { 622 } else if (rw == 0 && ireason == 1) {
653 /* 623 /*
654 * Some drives (ASUS) seem to tell us that status info is 624 * Some drives (ASUS) seem to tell us that status info is
@@ -696,16 +666,9 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
696 666
697static ide_startstop_t cdrom_newpc_intr(ide_drive_t *); 667static ide_startstop_t cdrom_newpc_intr(ide_drive_t *);
698 668
699/* 669static ide_startstop_t ide_cd_prepare_rw_request(ide_drive_t *drive,
700 * Routine to send a read/write packet command to the drive. This is usually 670 struct request *rq)
701 * called directly from cdrom_start_{read,write}(). However, for drq_interrupt
702 * devices, it is called from an interrupt when the drive is ready to accept
703 * the command.
704 */
705static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
706{ 671{
707 struct request *rq = HWGROUP(drive)->rq;
708
709 if (rq_data_dir(rq) == READ) { 672 if (rq_data_dir(rq) == READ) {
710 unsigned short sectors_per_frame = 673 unsigned short sectors_per_frame =
711 queue_hardsect_size(drive->queue) >> SECTOR_BITS; 674 queue_hardsect_size(drive->queue) >> SECTOR_BITS;
@@ -742,6 +705,19 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
742 /* set up the command */ 705 /* set up the command */
743 rq->timeout = ATAPI_WAIT_PC; 706 rq->timeout = ATAPI_WAIT_PC;
744 707
708 return ide_started;
709}
710
711/*
712 * Routine to send a read/write packet command to the drive. This is usually
713 * called directly from cdrom_start_{read,write}(). However, for drq_interrupt
714 * devices, it is called from an interrupt when the drive is ready to accept
715 * the command.
716 */
717static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
718{
719 struct request *rq = drive->hwif->hwgroup->rq;
720
745 /* send the command to the drive and return */ 721 /* send the command to the drive and return */
746 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); 722 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
747} 723}
@@ -768,9 +744,8 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
768 return ide_stopped; 744 return ide_stopped;
769} 745}
770 746
771static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive) 747static void ide_cd_prepare_seek_request(ide_drive_t *drive, struct request *rq)
772{ 748{
773 struct request *rq = HWGROUP(drive)->rq;
774 sector_t frame = rq->sector; 749 sector_t frame = rq->sector;
775 750
776 sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS); 751 sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS);
@@ -780,17 +755,13 @@ static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
780 put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]); 755 put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
781 756
782 rq->timeout = ATAPI_WAIT_PC; 757 rq->timeout = ATAPI_WAIT_PC;
783 return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
784} 758}
785 759
786static ide_startstop_t cdrom_start_seek(ide_drive_t *drive, unsigned int block) 760static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
787{ 761{
788 struct cdrom_info *info = drive->driver_data; 762 struct request *rq = drive->hwif->hwgroup->rq;
789 763
790 info->dma = 0; 764 return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
791 info->start_seek = jiffies;
792 return cdrom_start_packet_command(drive, 0,
793 cdrom_start_seek_continuation);
794} 765}
795 766
796/* 767/*
@@ -1011,7 +982,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1011 - bio_cur_sectors(rq->bio), 982 - bio_cur_sectors(rq->bio),
1012 thislen >> 9); 983 thislen >> 9);
1013 if (nskip > 0) { 984 if (nskip > 0) {
1014 ide_cd_drain_data(drive, nskip); 985 ide_pad_transfer(drive, write, nskip << 9);
1015 rq->current_nr_sectors -= nskip; 986 rq->current_nr_sectors -= nskip;
1016 thislen -= (nskip << 9); 987 thislen -= (nskip << 9);
1017 } 988 }
@@ -1048,7 +1019,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1048 * If the buffers are full, pipe the rest into 1019 * If the buffers are full, pipe the rest into
1049 * oblivion. 1020 * oblivion.
1050 */ 1021 */
1051 ide_cd_drain_data(drive, thislen >> 9); 1022 ide_pad_transfer(drive, 0, thislen);
1052 else { 1023 else {
1053 printk(KERN_ERR "%s: confused, missing data\n", 1024 printk(KERN_ERR "%s: confused, missing data\n",
1054 drive->name); 1025 drive->name);
@@ -1096,7 +1067,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1096 1067
1097 /* pad, if necessary */ 1068 /* pad, if necessary */
1098 if (!blk_fs_request(rq) && len > 0) 1069 if (!blk_fs_request(rq) && len > 0)
1099 ide_cd_pad_transfer(drive, xferfunc, len); 1070 ide_pad_transfer(drive, write, len);
1100 1071
1101 if (blk_pc_request(rq)) { 1072 if (blk_pc_request(rq)) {
1102 timeout = rq->timeout; 1073 timeout = rq->timeout;
@@ -1165,21 +1136,17 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
1165 if (write) 1136 if (write)
1166 cd->devinfo.media_written = 1; 1137 cd->devinfo.media_written = 1;
1167 1138
1168 /* start sending the read/write request to the drive */ 1139 return ide_started;
1169 return cdrom_start_packet_command(drive, 32768, cdrom_start_rw_cont);
1170} 1140}
1171 1141
1172static ide_startstop_t cdrom_do_newpc_cont(ide_drive_t *drive) 1142static ide_startstop_t cdrom_do_newpc_cont(ide_drive_t *drive)
1173{ 1143{
1174 struct request *rq = HWGROUP(drive)->rq; 1144 struct request *rq = HWGROUP(drive)->rq;
1175 1145
1176 if (!rq->timeout)
1177 rq->timeout = ATAPI_WAIT_PC;
1178
1179 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); 1146 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
1180} 1147}
1181 1148
1182static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) 1149static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1183{ 1150{
1184 struct cdrom_info *info = drive->driver_data; 1151 struct cdrom_info *info = drive->driver_data;
1185 1152
@@ -1191,10 +1158,16 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1191 info->dma = 0; 1158 info->dma = 0;
1192 1159
1193 /* sg request */ 1160 /* sg request */
1194 if (rq->bio) { 1161 if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) {
1195 int mask = drive->queue->dma_alignment; 1162 struct request_queue *q = drive->queue;
1196 unsigned long addr = 1163 unsigned int alignment;
1197 (unsigned long)page_address(bio_page(rq->bio)); 1164 unsigned long addr;
1165 unsigned long stack_mask = ~(THREAD_SIZE - 1);
1166
1167 if (rq->bio)
1168 addr = (unsigned long)bio_data(rq->bio);
1169 else
1170 addr = (unsigned long)rq->data;
1198 1171
1199 info->dma = drive->using_dma; 1172 info->dma = drive->using_dma;
1200 1173
@@ -1204,23 +1177,25 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1204 * NOTE! The "len" and "addr" checks should possibly have 1177 * NOTE! The "len" and "addr" checks should possibly have
1205 * separate masks. 1178 * separate masks.
1206 */ 1179 */
1207 if ((rq->data_len & 15) || (addr & mask)) 1180 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1181 if (addr & alignment || rq->data_len & alignment)
1208 info->dma = 0; 1182 info->dma = 0;
1209 }
1210 1183
1211 /* start sending the command to the drive */ 1184 if (!((addr & stack_mask) ^
1212 return cdrom_start_packet_command(drive, rq->data_len, 1185 ((unsigned long)current->stack & stack_mask)))
1213 cdrom_do_newpc_cont); 1186 info->dma = 0;
1187 }
1214} 1188}
1215 1189
1216/* 1190/*
1217 * cdrom driver request routine. 1191 * cdrom driver request routine.
1218 */ 1192 */
1219static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, 1193static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1220 sector_t block) 1194 sector_t block)
1221{ 1195{
1222 ide_startstop_t action;
1223 struct cdrom_info *info = drive->driver_data; 1196 struct cdrom_info *info = drive->driver_data;
1197 ide_handler_t *fn;
1198 int xferlen;
1224 1199
1225 if (blk_fs_request(rq)) { 1200 if (blk_fs_request(rq)) {
1226 if (info->cd_flags & IDE_CD_FLAG_SEEKING) { 1201 if (info->cd_flags & IDE_CD_FLAG_SEEKING) {
@@ -1240,29 +1215,48 @@ static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq,
1240 } 1215 }
1241 if (rq_data_dir(rq) == READ && 1216 if (rq_data_dir(rq) == READ &&
1242 IDE_LARGE_SEEK(info->last_block, block, 1217 IDE_LARGE_SEEK(info->last_block, block,
1243 IDECD_SEEK_THRESHOLD) && 1218 IDECD_SEEK_THRESHOLD) &&
1244 drive->dsc_overlap) 1219 drive->dsc_overlap) {
1245 action = cdrom_start_seek(drive, block); 1220 xferlen = 0;
1246 else 1221 fn = cdrom_start_seek_continuation;
1247 action = cdrom_start_rw(drive, rq); 1222
1223 info->dma = 0;
1224 info->start_seek = jiffies;
1225
1226 ide_cd_prepare_seek_request(drive, rq);
1227 } else {
1228 xferlen = 32768;
1229 fn = cdrom_start_rw_cont;
1230
1231 if (cdrom_start_rw(drive, rq) == ide_stopped)
1232 return ide_stopped;
1233
1234 if (ide_cd_prepare_rw_request(drive, rq) == ide_stopped)
1235 return ide_stopped;
1236 }
1248 info->last_block = block; 1237 info->last_block = block;
1249 return action;
1250 } else if (blk_sense_request(rq) || blk_pc_request(rq) || 1238 } else if (blk_sense_request(rq) || blk_pc_request(rq) ||
1251 rq->cmd_type == REQ_TYPE_ATA_PC) { 1239 rq->cmd_type == REQ_TYPE_ATA_PC) {
1252 return cdrom_do_block_pc(drive, rq); 1240 xferlen = rq->data_len;
1241 fn = cdrom_do_newpc_cont;
1242
1243 if (!rq->timeout)
1244 rq->timeout = ATAPI_WAIT_PC;
1245
1246 cdrom_do_block_pc(drive, rq);
1253 } else if (blk_special_request(rq)) { 1247 } else if (blk_special_request(rq)) {
1254 /* right now this can only be a reset... */ 1248 /* right now this can only be a reset... */
1255 cdrom_end_request(drive, 1); 1249 cdrom_end_request(drive, 1);
1256 return ide_stopped; 1250 return ide_stopped;
1251 } else {
1252 blk_dump_rq_flags(rq, "ide-cd bad flags");
1253 cdrom_end_request(drive, 0);
1254 return ide_stopped;
1257 } 1255 }
1258 1256
1259 blk_dump_rq_flags(rq, "ide-cd bad flags"); 1257 return cdrom_start_packet_command(drive, xferlen, fn);
1260 cdrom_end_request(drive, 0);
1261 return ide_stopped;
1262} 1258}
1263 1259
1264
1265
1266/* 1260/*
1267 * Ioctl handling. 1261 * Ioctl handling.
1268 * 1262 *
@@ -1872,6 +1866,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
1872 1866
1873 blk_queue_prep_rq(drive->queue, ide_cdrom_prep_fn); 1867 blk_queue_prep_rq(drive->queue, ide_cdrom_prep_fn);
1874 blk_queue_dma_alignment(drive->queue, 31); 1868 blk_queue_dma_alignment(drive->queue, 31);
1869 blk_queue_update_dma_pad(drive->queue, 15);
1875 drive->queue->unplug_delay = (1 * HZ) / 1000; 1870 drive->queue->unplug_delay = (1 * HZ) / 1000;
1876 if (!drive->queue->unplug_delay) 1871 if (!drive->queue->unplug_delay)
1877 drive->queue->unplug_delay = 1; 1872 drive->queue->unplug_delay = 1;
@@ -1954,10 +1949,9 @@ static ide_driver_t ide_cdrom_driver = {
1954 .version = IDECD_VERSION, 1949 .version = IDECD_VERSION,
1955 .media = ide_cdrom, 1950 .media = ide_cdrom,
1956 .supports_dsc_overlap = 1, 1951 .supports_dsc_overlap = 1,
1957 .do_request = ide_do_rw_cdrom, 1952 .do_request = ide_cd_do_request,
1958 .end_request = ide_end_request, 1953 .end_request = ide_end_request,
1959 .error = __ide_error, 1954 .error = __ide_error,
1960 .abort = __ide_abort,
1961#ifdef CONFIG_IDE_PROC_FS 1955#ifdef CONFIG_IDE_PROC_FS
1962 .proc = idecd_proc, 1956 .proc = idecd_proc,
1963#endif 1957#endif
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 5f49a4ae9dd8..3a2e80237c10 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -985,7 +985,6 @@ static ide_driver_t idedisk_driver = {
985 .do_request = ide_do_rw_disk, 985 .do_request = ide_do_rw_disk,
986 .end_request = ide_end_request, 986 .end_request = ide_end_request,
987 .error = __ide_error, 987 .error = __ide_error,
988 .abort = __ide_abort,
989#ifdef CONFIG_IDE_PROC_FS 988#ifdef CONFIG_IDE_PROC_FS
990 .proc = idedisk_proc, 989 .proc = idedisk_proc,
991#endif 990#endif
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index b3689437269f..011d72011cc4 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -351,10 +351,7 @@ static void ide_floppy_callback(ide_drive_t *drive)
351 351
352static void idefloppy_init_pc(struct ide_atapi_pc *pc) 352static void idefloppy_init_pc(struct ide_atapi_pc *pc)
353{ 353{
354 memset(pc->c, 0, 12); 354 memset(pc, 0, sizeof(*pc));
355 pc->retries = 0;
356 pc->flags = 0;
357 pc->req_xfer = 0;
358 pc->buf = pc->pc_buf; 355 pc->buf = pc->pc_buf;
359 pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE; 356 pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE;
360 pc->callback = ide_floppy_callback; 357 pc->callback = ide_floppy_callback;
@@ -561,12 +558,6 @@ static void idefloppy_create_start_stop_cmd(struct ide_atapi_pc *pc, int start)
561 pc->c[4] = start; 558 pc->c[4] = start;
562} 559}
563 560
564static void idefloppy_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
565{
566 idefloppy_init_pc(pc);
567 pc->c[0] = GPCMD_TEST_UNIT_READY;
568}
569
570static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy, 561static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy,
571 struct ide_atapi_pc *pc, struct request *rq, 562 struct ide_atapi_pc *pc, struct request *rq,
572 unsigned long sector) 563 unsigned long sector)
@@ -711,10 +702,10 @@ static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive)
711 set_disk_ro(floppy->disk, floppy->wp); 702 set_disk_ro(floppy->disk, floppy->wp);
712 page = &pc.buf[8]; 703 page = &pc.buf[8];
713 704
714 transfer_rate = be16_to_cpu(*(u16 *)&pc.buf[8 + 2]); 705 transfer_rate = be16_to_cpup((__be16 *)&pc.buf[8 + 2]);
715 sector_size = be16_to_cpu(*(u16 *)&pc.buf[8 + 6]); 706 sector_size = be16_to_cpup((__be16 *)&pc.buf[8 + 6]);
716 cyls = be16_to_cpu(*(u16 *)&pc.buf[8 + 8]); 707 cyls = be16_to_cpup((__be16 *)&pc.buf[8 + 8]);
717 rpm = be16_to_cpu(*(u16 *)&pc.buf[8 + 28]); 708 rpm = be16_to_cpup((__be16 *)&pc.buf[8 + 28]);
718 heads = pc.buf[8 + 4]; 709 heads = pc.buf[8 + 4];
719 sectors = pc.buf[8 + 5]; 710 sectors = pc.buf[8 + 5];
720 711
@@ -789,8 +780,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
789 for (i = 0; i < desc_cnt; i++) { 780 for (i = 0; i < desc_cnt; i++) {
790 unsigned int desc_start = 4 + i*8; 781 unsigned int desc_start = 4 + i*8;
791 782
792 blocks = be32_to_cpu(*(u32 *)&pc.buf[desc_start]); 783 blocks = be32_to_cpup((__be32 *)&pc.buf[desc_start]);
793 length = be16_to_cpu(*(u16 *)&pc.buf[desc_start + 6]); 784 length = be16_to_cpup((__be16 *)&pc.buf[desc_start + 6]);
794 785
795 debug_log("Descriptor %d: %dkB, %d blocks, %d sector size\n", 786 debug_log("Descriptor %d: %dkB, %d blocks, %d sector size\n",
796 i, blocks * length / 1024, blocks, length); 787 i, blocks * length / 1024, blocks, length);
@@ -911,8 +902,8 @@ static int ide_floppy_get_format_capacities(ide_drive_t *drive, int __user *arg)
911 if (u_index >= u_array_size) 902 if (u_index >= u_array_size)
912 break; /* User-supplied buffer too small */ 903 break; /* User-supplied buffer too small */
913 904
914 blocks = be32_to_cpu(*(u32 *)&pc.buf[desc_start]); 905 blocks = be32_to_cpup((__be32 *)&pc.buf[desc_start]);
915 length = be16_to_cpu(*(u16 *)&pc.buf[desc_start + 6]); 906 length = be16_to_cpup((__be16 *)&pc.buf[desc_start + 6]);
916 907
917 if (put_user(blocks, argp)) 908 if (put_user(blocks, argp))
918 return(-EFAULT); 909 return(-EFAULT);
@@ -1138,7 +1129,6 @@ static ide_driver_t idefloppy_driver = {
1138 .do_request = idefloppy_do_request, 1129 .do_request = idefloppy_do_request,
1139 .end_request = idefloppy_end_request, 1130 .end_request = idefloppy_end_request,
1140 .error = __ide_error, 1131 .error = __ide_error,
1141 .abort = __ide_abort,
1142#ifdef CONFIG_IDE_PROC_FS 1132#ifdef CONFIG_IDE_PROC_FS
1143 .proc = idefloppy_proc, 1133 .proc = idefloppy_proc,
1144#endif 1134#endif
@@ -1166,7 +1156,9 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
1166 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1156 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
1167 /* Just in case */ 1157 /* Just in case */
1168 1158
1169 idefloppy_create_test_unit_ready_cmd(&pc); 1159 idefloppy_init_pc(&pc);
1160 pc.c[0] = GPCMD_TEST_UNIT_READY;
1161
1170 if (idefloppy_queue_pc_tail(drive, &pc)) { 1162 if (idefloppy_queue_pc_tail(drive, &pc)) {
1171 idefloppy_create_start_stop_cmd(&pc, 1); 1163 idefloppy_create_start_stop_cmd(&pc, 1);
1172 (void) idefloppy_queue_pc_tail(drive, &pc); 1164 (void) idefloppy_queue_pc_tail(drive, &pc);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 28057747c1f8..661b75a89d4d 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -504,55 +504,6 @@ ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
504 504
505EXPORT_SYMBOL_GPL(ide_error); 505EXPORT_SYMBOL_GPL(ide_error);
506 506
507ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq)
508{
509 if (drive->media != ide_disk)
510 rq->errors |= ERROR_RESET;
511
512 ide_kill_rq(drive, rq);
513
514 return ide_stopped;
515}
516
517EXPORT_SYMBOL_GPL(__ide_abort);
518
519/**
520 * ide_abort - abort pending IDE operations
521 * @drive: drive the error occurred on
522 * @msg: message to report
523 *
524 * ide_abort kills and cleans up when we are about to do a
525 * host initiated reset on active commands. Longer term we
526 * want handlers to have sensible abort handling themselves
527 *
528 * This differs fundamentally from ide_error because in
529 * this case the command is doing just fine when we
530 * blow it away.
531 */
532
533ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
534{
535 struct request *rq;
536
537 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
538 return ide_stopped;
539
540 /* retry only "normal" I/O: */
541 if (!blk_fs_request(rq)) {
542 rq->errors = 1;
543 ide_end_drive_cmd(drive, BUSY_STAT, 0);
544 return ide_stopped;
545 }
546
547 if (rq->rq_disk) {
548 ide_driver_t *drv;
549
550 drv = *(ide_driver_t **)rq->rq_disk->private_data;
551 return drv->abort(drive, rq);
552 } else
553 return __ide_abort(drive, rq);
554}
555
556static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 507static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
557{ 508{
558 tf->nsect = drive->sect; 509 tf->nsect = drive->sect;
@@ -766,6 +717,18 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
766 return ide_stopped; 717 return ide_stopped;
767} 718}
768 719
720static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
721{
722 switch (rq->cmd[0]) {
723 case REQ_DRIVE_RESET:
724 return ide_do_reset(drive);
725 default:
726 blk_dump_rq_flags(rq, "ide_special_rq - bad request");
727 ide_end_request(drive, 0, 0);
728 return ide_stopped;
729 }
730}
731
769static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 732static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
770{ 733{
771 struct request_pm_state *pm = rq->data; 734 struct request_pm_state *pm = rq->data;
@@ -869,7 +832,16 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
869 pm->pm_step == ide_pm_state_completed) 832 pm->pm_step == ide_pm_state_completed)
870 ide_complete_pm_request(drive, rq); 833 ide_complete_pm_request(drive, rq);
871 return startstop; 834 return startstop;
872 } 835 } else if (!rq->rq_disk && blk_special_request(rq))
836 /*
837 * TODO: Once all ULDs have been modified to
838 * check for specific op codes rather than
839 * blindly accepting any special request, the
840 * check for ->rq_disk above may be replaced
841 * by a more suitable mechanism or even
842 * dropped entirely.
843 */
844 return ide_special_rq(drive, rq);
873 845
874 drv = *(ide_driver_t **)rq->rq_disk->private_data; 846 drv = *(ide_driver_t **)rq->rq_disk->private_data;
875 return drv->do_request(drive, rq, block); 847 return drv->do_request(drive, rq, block);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 80ad4f234f3f..44aaec256a30 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -905,6 +905,14 @@ void ide_execute_pkt_cmd(ide_drive_t *drive)
905} 905}
906EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd); 906EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
907 907
908static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
909{
910 struct request *rq = drive->hwif->hwgroup->rq;
911
912 if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET)
913 ide_end_request(drive, err ? err : 1, 0);
914}
915
908/* needed below */ 916/* needed below */
909static ide_startstop_t do_reset1 (ide_drive_t *, int); 917static ide_startstop_t do_reset1 (ide_drive_t *, int);
910 918
@@ -940,7 +948,7 @@ static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
940 } 948 }
941 /* done polling */ 949 /* done polling */
942 hwgroup->polling = 0; 950 hwgroup->polling = 0;
943 hwgroup->resetting = 0; 951 ide_complete_drive_reset(drive, 0);
944 return ide_stopped; 952 return ide_stopped;
945} 953}
946 954
@@ -956,12 +964,14 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
956 ide_hwif_t *hwif = HWIF(drive); 964 ide_hwif_t *hwif = HWIF(drive);
957 const struct ide_port_ops *port_ops = hwif->port_ops; 965 const struct ide_port_ops *port_ops = hwif->port_ops;
958 u8 tmp; 966 u8 tmp;
967 int err = 0;
959 968
960 if (port_ops && port_ops->reset_poll) { 969 if (port_ops && port_ops->reset_poll) {
961 if (port_ops->reset_poll(drive)) { 970 err = port_ops->reset_poll(drive);
971 if (err) {
962 printk(KERN_ERR "%s: host reset_poll failure for %s.\n", 972 printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
963 hwif->name, drive->name); 973 hwif->name, drive->name);
964 return ide_stopped; 974 goto out;
965 } 975 }
966 } 976 }
967 977
@@ -975,6 +985,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
975 } 985 }
976 printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp); 986 printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
977 drive->failures++; 987 drive->failures++;
988 err = -EIO;
978 } else { 989 } else {
979 printk("%s: reset: ", hwif->name); 990 printk("%s: reset: ", hwif->name);
980 tmp = ide_read_error(drive); 991 tmp = ide_read_error(drive);
@@ -1001,10 +1012,12 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
1001 if (tmp & 0x80) 1012 if (tmp & 0x80)
1002 printk("; slave: failed"); 1013 printk("; slave: failed");
1003 printk("\n"); 1014 printk("\n");
1015 err = -EIO;
1004 } 1016 }
1005 } 1017 }
1018out:
1006 hwgroup->polling = 0; /* done polling */ 1019 hwgroup->polling = 0; /* done polling */
1007 hwgroup->resetting = 0; /* done reset attempt */ 1020 ide_complete_drive_reset(drive, err);
1008 return ide_stopped; 1021 return ide_stopped;
1009} 1022}
1010 1023
@@ -1090,7 +1103,6 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1090 1103
1091 /* For an ATAPI device, first try an ATAPI SRST. */ 1104 /* For an ATAPI device, first try an ATAPI SRST. */
1092 if (drive->media != ide_disk && !do_not_try_atapi) { 1105 if (drive->media != ide_disk && !do_not_try_atapi) {
1093 hwgroup->resetting = 1;
1094 pre_reset(drive); 1106 pre_reset(drive);
1095 SELECT_DRIVE(drive); 1107 SELECT_DRIVE(drive);
1096 udelay (20); 1108 udelay (20);
@@ -1112,10 +1124,10 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1112 1124
1113 if (io_ports->ctl_addr == 0) { 1125 if (io_ports->ctl_addr == 0) {
1114 spin_unlock_irqrestore(&ide_lock, flags); 1126 spin_unlock_irqrestore(&ide_lock, flags);
1127 ide_complete_drive_reset(drive, -ENXIO);
1115 return ide_stopped; 1128 return ide_stopped;
1116 } 1129 }
1117 1130
1118 hwgroup->resetting = 1;
1119 /* 1131 /*
1120 * Note that we also set nIEN while resetting the device, 1132 * Note that we also set nIEN while resetting the device,
1121 * to mask unwanted interrupts from the interface during the reset. 1133 * to mask unwanted interrupts from the interface during the reset.
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 47af80df6872..13af72f09ec4 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -1,26 +1,11 @@
1#include <linux/module.h>
2#include <linux/types.h> 1#include <linux/types.h>
3#include <linux/string.h> 2#include <linux/string.h>
4#include <linux/kernel.h> 3#include <linux/kernel.h>
5#include <linux/timer.h>
6#include <linux/mm.h>
7#include <linux/interrupt.h> 4#include <linux/interrupt.h>
8#include <linux/major.h>
9#include <linux/errno.h>
10#include <linux/genhd.h>
11#include <linux/blkpg.h>
12#include <linux/slab.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/hdreg.h> 5#include <linux/hdreg.h>
16#include <linux/ide.h> 6#include <linux/ide.h>
17#include <linux/bitops.h> 7#include <linux/bitops.h>
18 8
19#include <asm/byteorder.h>
20#include <asm/irq.h>
21#include <asm/uaccess.h>
22#include <asm/io.h>
23
24static const char *udma_str[] = 9static const char *udma_str[] =
25 { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44", 10 { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44",
26 "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" }; 11 "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" };
@@ -90,142 +75,6 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
90 return min(speed, mode); 75 return min(speed, mode);
91} 76}
92 77
93/*
94 * Standard (generic) timings for PIO modes, from ATA2 specification.
95 * These timings are for access to the IDE data port register *only*.
96 * Some drives may specify a mode, while also specifying a different
97 * value for cycle_time (from drive identification data).
98 */
99const ide_pio_timings_t ide_pio_timings[6] = {
100 { 70, 165, 600 }, /* PIO Mode 0 */
101 { 50, 125, 383 }, /* PIO Mode 1 */
102 { 30, 100, 240 }, /* PIO Mode 2 */
103 { 30, 80, 180 }, /* PIO Mode 3 with IORDY */
104 { 25, 70, 120 }, /* PIO Mode 4 with IORDY */
105 { 20, 50, 100 } /* PIO Mode 5 with IORDY (nonstandard) */
106};
107
108EXPORT_SYMBOL_GPL(ide_pio_timings);
109
110/*
111 * Shared data/functions for determining best PIO mode for an IDE drive.
112 * Most of this stuff originally lived in cmd640.c, and changes to the
113 * ide_pio_blacklist[] table should be made with EXTREME CAUTION to avoid
114 * breaking the fragile cmd640.c support.
115 */
116
117/*
118 * Black list. Some drives incorrectly report their maximal PIO mode,
119 * at least in respect to CMD640. Here we keep info on some known drives.
120 */
121static struct ide_pio_info {
122 const char *name;
123 int pio;
124} ide_pio_blacklist [] = {
125 { "Conner Peripherals 540MB - CFS540A", 3 },
126
127 { "WDC AC2700", 3 },
128 { "WDC AC2540", 3 },
129 { "WDC AC2420", 3 },
130 { "WDC AC2340", 3 },
131 { "WDC AC2250", 0 },
132 { "WDC AC2200", 0 },
133 { "WDC AC21200", 4 },
134 { "WDC AC2120", 0 },
135 { "WDC AC2850", 3 },
136 { "WDC AC1270", 3 },
137 { "WDC AC1170", 1 },
138 { "WDC AC1210", 1 },
139 { "WDC AC280", 0 },
140 { "WDC AC31000", 3 },
141 { "WDC AC31200", 3 },
142
143 { "Maxtor 7131 AT", 1 },
144 { "Maxtor 7171 AT", 1 },
145 { "Maxtor 7213 AT", 1 },
146 { "Maxtor 7245 AT", 1 },
147 { "Maxtor 7345 AT", 1 },
148 { "Maxtor 7546 AT", 3 },
149 { "Maxtor 7540 AV", 3 },
150
151 { "SAMSUNG SHD-3121A", 1 },
152 { "SAMSUNG SHD-3122A", 1 },
153 { "SAMSUNG SHD-3172A", 1 },
154
155 { "ST5660A", 3 },
156 { "ST3660A", 3 },
157 { "ST3630A", 3 },
158 { "ST3655A", 3 },
159 { "ST3391A", 3 },
160 { "ST3390A", 1 },
161 { "ST3600A", 1 },
162 { "ST3290A", 0 },
163 { "ST3144A", 0 },
164 { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on */
165 /* drive) according to Seagates FIND-ATA program */
166
167 { "QUANTUM ELS127A", 0 },
168 { "QUANTUM ELS170A", 0 },
169 { "QUANTUM LPS240A", 0 },
170 { "QUANTUM LPS210A", 3 },
171 { "QUANTUM LPS270A", 3 },
172 { "QUANTUM LPS365A", 3 },
173 { "QUANTUM LPS540A", 3 },
174 { "QUANTUM LIGHTNING 540A", 3 },
175 { "QUANTUM LIGHTNING 730A", 3 },
176
177 { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */
178 { "QUANTUM FIREBALL_640", 3 },
179 { "QUANTUM FIREBALL_1080", 3 },
180 { "QUANTUM FIREBALL_1280", 3 },
181 { NULL, 0 }
182};
183
184/**
185 * ide_scan_pio_blacklist - check for a blacklisted drive
186 * @model: Drive model string
187 *
188 * This routine searches the ide_pio_blacklist for an entry
189 * matching the start/whole of the supplied model name.
190 *
191 * Returns -1 if no match found.
192 * Otherwise returns the recommended PIO mode from ide_pio_blacklist[].
193 */
194
195static int ide_scan_pio_blacklist (char *model)
196{
197 struct ide_pio_info *p;
198
199 for (p = ide_pio_blacklist; p->name != NULL; p++) {
200 if (strncmp(p->name, model, strlen(p->name)) == 0)
201 return p->pio;
202 }
203 return -1;
204}
205
206unsigned int ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
207{
208 struct hd_driveid *id = drive->id;
209 int cycle_time = 0;
210
211 if (id->field_valid & 2) {
212 if (id->capability & 8)
213 cycle_time = id->eide_pio_iordy;
214 else
215 cycle_time = id->eide_pio;
216 }
217
218 /* conservative "downgrade" for all pre-ATA2 drives */
219 if (pio < 3) {
220 if (cycle_time && cycle_time < ide_pio_timings[pio].cycle_time)
221 cycle_time = 0; /* use standard timing */
222 }
223
224 return cycle_time ? cycle_time : ide_pio_timings[pio].cycle_time;
225}
226
227EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
228
229/** 78/**
230 * ide_get_best_pio_mode - get PIO mode from drive 79 * ide_get_best_pio_mode - get PIO mode from drive
231 * @drive: drive to consider 80 * @drive: drive to consider
diff --git a/drivers/ide/ide-pio-blacklist.c b/drivers/ide/ide-pio-blacklist.c
new file mode 100644
index 000000000000..a8c2c8f8660a
--- /dev/null
+++ b/drivers/ide/ide-pio-blacklist.c
@@ -0,0 +1,94 @@
1/*
2 * PIO blacklist. Some drives incorrectly report their maximal PIO mode,
3 * at least in respect to CMD640. Here we keep info on some known drives.
4 *
5 * Changes to the ide_pio_blacklist[] should be made with EXTREME CAUTION
6 * to avoid breaking the fragile cmd640.c support.
7 */
8
9#include <linux/string.h>
10
11static struct ide_pio_info {
12 const char *name;
13 int pio;
14} ide_pio_blacklist [] = {
15 { "Conner Peripherals 540MB - CFS540A", 3 },
16
17 { "WDC AC2700", 3 },
18 { "WDC AC2540", 3 },
19 { "WDC AC2420", 3 },
20 { "WDC AC2340", 3 },
21 { "WDC AC2250", 0 },
22 { "WDC AC2200", 0 },
23 { "WDC AC21200", 4 },
24 { "WDC AC2120", 0 },
25 { "WDC AC2850", 3 },
26 { "WDC AC1270", 3 },
27 { "WDC AC1170", 1 },
28 { "WDC AC1210", 1 },
29 { "WDC AC280", 0 },
30 { "WDC AC31000", 3 },
31 { "WDC AC31200", 3 },
32
33 { "Maxtor 7131 AT", 1 },
34 { "Maxtor 7171 AT", 1 },
35 { "Maxtor 7213 AT", 1 },
36 { "Maxtor 7245 AT", 1 },
37 { "Maxtor 7345 AT", 1 },
38 { "Maxtor 7546 AT", 3 },
39 { "Maxtor 7540 AV", 3 },
40
41 { "SAMSUNG SHD-3121A", 1 },
42 { "SAMSUNG SHD-3122A", 1 },
43 { "SAMSUNG SHD-3172A", 1 },
44
45 { "ST5660A", 3 },
46 { "ST3660A", 3 },
47 { "ST3630A", 3 },
48 { "ST3655A", 3 },
49 { "ST3391A", 3 },
50 { "ST3390A", 1 },
51 { "ST3600A", 1 },
52 { "ST3290A", 0 },
53 { "ST3144A", 0 },
54 { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on drive)
55 according to Seagate's FIND-ATA program */
56
57 { "QUANTUM ELS127A", 0 },
58 { "QUANTUM ELS170A", 0 },
59 { "QUANTUM LPS240A", 0 },
60 { "QUANTUM LPS210A", 3 },
61 { "QUANTUM LPS270A", 3 },
62 { "QUANTUM LPS365A", 3 },
63 { "QUANTUM LPS540A", 3 },
64 { "QUANTUM LIGHTNING 540A", 3 },
65 { "QUANTUM LIGHTNING 730A", 3 },
66
67 { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */
68 { "QUANTUM FIREBALL_640", 3 },
69 { "QUANTUM FIREBALL_1080", 3 },
70 { "QUANTUM FIREBALL_1280", 3 },
71 { NULL, 0 }
72};
73
74/**
75 * ide_scan_pio_blacklist - check for a blacklisted drive
76 * @model: Drive model string
77 *
78 * This routine searches the ide_pio_blacklist for an entry
79 * matching the start/whole of the supplied model name.
80 *
81 * Returns -1 if no match found.
82 * Otherwise returns the recommended PIO mode from ide_pio_blacklist[].
83 */
84
85int ide_scan_pio_blacklist(char *model)
86{
87 struct ide_pio_info *p;
88
89 for (p = ide_pio_blacklist; p->name != NULL; p++) {
90 if (strncmp(p->name, model, strlen(p->name)) == 0)
91 return p->pio;
92 }
93 return -1;
94}
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index adbd01784162..03f2ef5470a3 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -33,6 +33,8 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
33 ide_hwif_t *hwif; 33 ide_hwif_t *hwif;
34 unsigned long base, ctl; 34 unsigned long base, ctl;
35 35
36 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
37
36 if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) 38 if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
37 return -1; 39 return -1;
38 40
@@ -62,10 +64,8 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
62 u8 index = hwif->index; 64 u8 index = hwif->index;
63 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 65 u8 idx[4] = { index, 0xff, 0xff, 0xff };
64 66
65 ide_init_port_data(hwif, index);
66 ide_init_port_hw(hwif, &hw); 67 ide_init_port_hw(hwif, &hw);
67 68
68 printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
69 pnp_set_drvdata(dev, hwif); 69 pnp_set_drvdata(dev, hwif);
70 70
71 ide_device_add(idx, NULL); 71 ide_device_add(idx, NULL);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index d21e51a02c3e..235ebdb29b28 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -39,6 +39,8 @@
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
43
42/** 44/**
43 * generic_id - add a generic drive id 45 * generic_id - add a generic drive id
44 * @drive: drive to make an ID block for 46 * @drive: drive to make an ID block for
@@ -1318,10 +1320,10 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
1318 drive->unmask = 1; 1320 drive->unmask = 1;
1319 if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) 1321 if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
1320 drive->no_unmask = 1; 1322 drive->no_unmask = 1;
1321 }
1322 1323
1323 if (port_ops && port_ops->port_init_devs) 1324 if (port_ops && port_ops->init_dev)
1324 port_ops->port_init_devs(hwif); 1325 port_ops->init_dev(drive);
1326 }
1325} 1327}
1326 1328
1327static void ide_init_port(ide_hwif_t *hwif, unsigned int port, 1329static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
@@ -1473,22 +1475,29 @@ ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
1473 for (; i < MAX_HWIFS; i++) { 1475 for (; i < MAX_HWIFS; i++) {
1474 hwif = &ide_hwifs[i]; 1476 hwif = &ide_hwifs[i];
1475 if (hwif->chipset == ide_unknown) 1477 if (hwif->chipset == ide_unknown)
1476 return hwif; 1478 goto out_found;
1477 } 1479 }
1478 } else { 1480 } else {
1479 for (i = 2; i < MAX_HWIFS; i++) { 1481 for (i = 2; i < MAX_HWIFS; i++) {
1480 hwif = &ide_hwifs[i]; 1482 hwif = &ide_hwifs[i];
1481 if (hwif->chipset == ide_unknown) 1483 if (hwif->chipset == ide_unknown)
1482 return hwif; 1484 goto out_found;
1483 } 1485 }
1484 for (i = 0; i < 2 && i < MAX_HWIFS; i++) { 1486 for (i = 0; i < 2 && i < MAX_HWIFS; i++) {
1485 hwif = &ide_hwifs[i]; 1487 hwif = &ide_hwifs[i];
1486 if (hwif->chipset == ide_unknown) 1488 if (hwif->chipset == ide_unknown)
1487 return hwif; 1489 goto out_found;
1488 } 1490 }
1489 } 1491 }
1490 1492
1493 printk(KERN_ERR "%s: no free slot for interface\n",
1494 d ? d->name : "ide");
1495
1491 return NULL; 1496 return NULL;
1497
1498out_found:
1499 ide_init_port_data(hwif, i);
1500 return hwif;
1492} 1501}
1493EXPORT_SYMBOL_GPL(ide_find_port_slot); 1502EXPORT_SYMBOL_GPL(ide_find_port_slot);
1494 1503
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index f9cf1670e4e1..b711ab96e287 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2591,7 +2591,6 @@ static ide_driver_t idetape_driver = {
2591 .do_request = idetape_do_request, 2591 .do_request = idetape_do_request,
2592 .end_request = idetape_end_request, 2592 .end_request = idetape_end_request,
2593 .error = __ide_error, 2593 .error = __ide_error,
2594 .abort = __ide_abort,
2595#ifdef CONFIG_IDE_PROC_FS 2594#ifdef CONFIG_IDE_PROC_FS
2596 .proc = idetape_proc, 2595 .proc = idetape_proc,
2597#endif 2596#endif
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index cf55a48a7dd2..1fbdb746dc88 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -8,28 +8,18 @@
8 * The big the bad and the ugly. 8 * The big the bad and the ugly.
9 */ 9 */
10 10
11#include <linux/module.h>
12#include <linux/types.h> 11#include <linux/types.h>
13#include <linux/string.h> 12#include <linux/string.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/mm.h>
17#include <linux/sched.h> 14#include <linux/sched.h>
18#include <linux/interrupt.h> 15#include <linux/interrupt.h>
19#include <linux/major.h>
20#include <linux/errno.h> 16#include <linux/errno.h>
21#include <linux/genhd.h>
22#include <linux/blkpg.h>
23#include <linux/slab.h> 17#include <linux/slab.h>
24#include <linux/pci.h>
25#include <linux/delay.h> 18#include <linux/delay.h>
26#include <linux/hdreg.h> 19#include <linux/hdreg.h>
27#include <linux/ide.h> 20#include <linux/ide.h>
28#include <linux/bitops.h>
29#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
30 22
31#include <asm/byteorder.h>
32#include <asm/irq.h>
33#include <asm/uaccess.h> 23#include <asm/uaccess.h>
34#include <asm/io.h> 24#include <asm/io.h>
35 25
@@ -62,25 +52,6 @@ int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
62 return ide_raw_taskfile(drive, &args, buf, 1); 52 return ide_raw_taskfile(drive, &args, buf, 1);
63} 53}
64 54
65static int inline task_dma_ok(ide_task_t *task)
66{
67 if (blk_fs_request(task->rq) || (task->tf_flags & IDE_TFLAG_FLAGGED))
68 return 1;
69
70 switch (task->tf.command) {
71 case WIN_WRITEDMA_ONCE:
72 case WIN_WRITEDMA:
73 case WIN_WRITEDMA_EXT:
74 case WIN_READDMA_ONCE:
75 case WIN_READDMA:
76 case WIN_READDMA_EXT:
77 case WIN_IDENTIFY_DMA:
78 return 1;
79 }
80
81 return 0;
82}
83
84static ide_startstop_t task_no_data_intr(ide_drive_t *); 55static ide_startstop_t task_no_data_intr(ide_drive_t *);
85static ide_startstop_t set_geometry_intr(ide_drive_t *); 56static ide_startstop_t set_geometry_intr(ide_drive_t *);
86static ide_startstop_t recal_intr(ide_drive_t *); 57static ide_startstop_t recal_intr(ide_drive_t *);
@@ -139,8 +110,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
139 WAIT_WORSTCASE, NULL); 110 WAIT_WORSTCASE, NULL);
140 return ide_started; 111 return ide_started;
141 default: 112 default:
142 if (task_dma_ok(task) == 0 || drive->using_dma == 0 || 113 if (drive->using_dma == 0 || dma_ops->dma_setup(drive))
143 dma_ops->dma_setup(drive))
144 return ide_stopped; 114 return ide_stopped;
145 dma_ops->dma_exec_cmd(drive, tf->command); 115 dma_ops->dma_exec_cmd(drive, tf->command);
146 dma_ops->dma_start(drive); 116 dma_ops->dma_start(drive);
@@ -183,7 +153,6 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
183 if (stat & (ERR_STAT|DRQ_STAT)) 153 if (stat & (ERR_STAT|DRQ_STAT))
184 return ide_error(drive, "set_geometry_intr", stat); 154 return ide_error(drive, "set_geometry_intr", stat);
185 155
186 BUG_ON(HWGROUP(drive)->handler != NULL);
187 ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL); 156 ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
188 return ide_started; 157 return ide_started;
189} 158}
diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timings.c
index 2e91c5870b4c..8c2f8327f487 100644
--- a/drivers/ide/ide-timing.h
+++ b/drivers/ide/ide-timings.c
@@ -1,11 +1,7 @@
1#ifndef _IDE_TIMING_H
2#define _IDE_TIMING_H
3
4/* 1/*
5 * Copyright (c) 1999-2001 Vojtech Pavlik 2 * Copyright (c) 1999-2001 Vojtech Pavlik
6 */ 3 * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz
7 4 *
8/*
9 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or 7 * the Free Software Foundation; either version 2 of the License, or
@@ -27,27 +23,14 @@
27 23
28#include <linux/kernel.h> 24#include <linux/kernel.h>
29#include <linux/hdreg.h> 25#include <linux/hdreg.h>
30 26#include <linux/ide.h>
31#define XFER_PIO_5 0x0d 27#include <linux/module.h>
32#define XFER_UDMA_SLOW 0x4f
33
34struct ide_timing {
35 short mode;
36 short setup; /* t1 */
37 short act8b; /* t2 for 8-bit io */
38 short rec8b; /* t2i for 8-bit io */
39 short cyc8b; /* t0 for 8-bit io */
40 short active; /* t2 or tD */
41 short recover; /* t2i or tK */
42 short cycle; /* t0 */
43 short udma; /* t2CYCTYP/2 */
44};
45 28
46/* 29/*
47 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 30 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
48 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 31 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
49 * for PIO 5, which is a nonstandard extension and UDMA6, which 32 * for PIO 5, which is a nonstandard extension and UDMA6, which
50 * is currently supported only by Maxtor drives. 33 * is currently supported only by Maxtor drives.
51 */ 34 */
52 35
53static struct ide_timing ide_timing[] = { 36static struct ide_timing ide_timing[] = {
@@ -61,12 +44,10 @@ static struct ide_timing ide_timing[] = {
61 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, 44 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
62 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, 45 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
63 46
64 { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 },
65
66 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, 47 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
67 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, 48 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
68 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, 49 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
69 50
70 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, 51 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
71 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 52 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
72 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, 53 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
@@ -81,29 +62,46 @@ static struct ide_timing ide_timing[] = {
81 62
82 { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, 63 { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 },
83 64
84 { -1 } 65 { 0xff }
85}; 66};
86 67
87#define IDE_TIMING_SETUP 0x01 68struct ide_timing *ide_timing_find_mode(u8 speed)
88#define IDE_TIMING_ACT8B 0x02 69{
89#define IDE_TIMING_REC8B 0x04 70 struct ide_timing *t;
90#define IDE_TIMING_CYC8B 0x08 71
91#define IDE_TIMING_8BIT 0x0e 72 for (t = ide_timing; t->mode != speed; t++)
92#define IDE_TIMING_ACTIVE 0x10 73 if (t->mode == 0xff)
93#define IDE_TIMING_RECOVER 0x20 74 return NULL;
94#define IDE_TIMING_CYCLE 0x40 75 return t;
95#define IDE_TIMING_UDMA 0x80 76}
96#define IDE_TIMING_ALL 0xff 77EXPORT_SYMBOL_GPL(ide_timing_find_mode);
97 78
98#define ENOUGH(v,unit) (((v)-1)/(unit)+1) 79u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
99#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) 80{
100 81 struct hd_driveid *id = drive->id;
101#define XFER_MODE 0xf0 82 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
102#define XFER_MWDMA 0x20 83 u16 cycle = 0;
103#define XFER_EPIO 0x01 84
104#define XFER_PIO 0x00 85 if (id->field_valid & 2) {
105 86 if (id->capability & 8)
106static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int T, int UT) 87 cycle = id->eide_pio_iordy;
88 else
89 cycle = id->eide_pio;
90
91 /* conservative "downgrade" for all pre-ATA2 drives */
92 if (pio < 3 && cycle < t->cycle)
93 cycle = 0; /* use standard timing */
94 }
95
96 return cycle ? cycle : t->cycle;
97}
98EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
99
100#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
101#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0)
102
103static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
104 int T, int UT)
107{ 105{
108 q->setup = EZ(t->setup * 1000, T); 106 q->setup = EZ(t->setup * 1000, T);
109 q->act8b = EZ(t->act8b * 1000, T); 107 q->act8b = EZ(t->act8b * 1000, T);
@@ -115,92 +113,83 @@ static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int
115 q->udma = EZ(t->udma * 1000, UT); 113 q->udma = EZ(t->udma * 1000, UT);
116} 114}
117 115
118static void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, struct ide_timing *m, unsigned int what) 116void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
119{ 117 struct ide_timing *m, unsigned int what)
120 if (what & IDE_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
121 if (what & IDE_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
122 if (what & IDE_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
123 if (what & IDE_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
124 if (what & IDE_TIMING_ACTIVE ) m->active = max(a->active, b->active);
125 if (what & IDE_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
126 if (what & IDE_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
127 if (what & IDE_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
128}
129
130static struct ide_timing* ide_timing_find_mode(short speed)
131{ 118{
132 struct ide_timing *t; 119 if (what & IDE_TIMING_SETUP)
133 120 m->setup = max(a->setup, b->setup);
134 for (t = ide_timing; t->mode != speed; t++) 121 if (what & IDE_TIMING_ACT8B)
135 if (t->mode < 0) 122 m->act8b = max(a->act8b, b->act8b);
136 return NULL; 123 if (what & IDE_TIMING_REC8B)
137 return t; 124 m->rec8b = max(a->rec8b, b->rec8b);
125 if (what & IDE_TIMING_CYC8B)
126 m->cyc8b = max(a->cyc8b, b->cyc8b);
127 if (what & IDE_TIMING_ACTIVE)
128 m->active = max(a->active, b->active);
129 if (what & IDE_TIMING_RECOVER)
130 m->recover = max(a->recover, b->recover);
131 if (what & IDE_TIMING_CYCLE)
132 m->cycle = max(a->cycle, b->cycle);
133 if (what & IDE_TIMING_UDMA)
134 m->udma = max(a->udma, b->udma);
138} 135}
136EXPORT_SYMBOL_GPL(ide_timing_merge);
139 137
140static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing *t, int T, int UT) 138int ide_timing_compute(ide_drive_t *drive, u8 speed,
139 struct ide_timing *t, int T, int UT)
141{ 140{
142 struct hd_driveid *id = drive->id; 141 struct hd_driveid *id = drive->id;
143 struct ide_timing *s, p; 142 struct ide_timing *s, p;
144 143
145/* 144 /*
146 * Find the mode. 145 * Find the mode.
147 */ 146 */
148 147 s = ide_timing_find_mode(speed);
149 if (!(s = ide_timing_find_mode(speed))) 148 if (s == NULL)
150 return -EINVAL; 149 return -EINVAL;
151 150
152/* 151 /*
153 * Copy the timing from the table. 152 * Copy the timing from the table.
154 */ 153 */
155
156 *t = *s; 154 *t = *s;
157 155
158/* 156 /*
159 * If the drive is an EIDE drive, it can tell us it needs extended 157 * If the drive is an EIDE drive, it can tell us it needs extended
160 * PIO/MWDMA cycle timing. 158 * PIO/MWDMA cycle timing.
161 */ 159 */
162
163 if (id && id->field_valid & 2) { /* EIDE drive */ 160 if (id && id->field_valid & 2) { /* EIDE drive */
164 161
165 memset(&p, 0, sizeof(p)); 162 memset(&p, 0, sizeof(p));
166 163
167 switch (speed & XFER_MODE) { 164 if (speed <= XFER_PIO_2)
168 165 p.cycle = p.cyc8b = id->eide_pio;
169 case XFER_PIO: 166 else if (speed <= XFER_PIO_5)
170 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = id->eide_pio; 167 p.cycle = p.cyc8b = id->eide_pio_iordy;
171 else p.cycle = p.cyc8b = id->eide_pio_iordy; 168 else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
172 break; 169 p.cycle = id->eide_dma_min;
173
174 case XFER_MWDMA:
175 p.cycle = id->eide_dma_min;
176 break;
177 }
178 170
179 ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B); 171 ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
180 } 172 }
181 173
182/* 174 /*
183 * Convert the timing to bus clock counts. 175 * Convert the timing to bus clock counts.
184 */ 176 */
185
186 ide_timing_quantize(t, t, T, UT); 177 ide_timing_quantize(t, t, T, UT);
187 178
188/* 179 /*
189 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 180 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
190 * and some other commands. We have to ensure that the DMA cycle timing is 181 * S.M.A.R.T and some other commands. We have to ensure that the
191 * slower/equal than the fastest PIO timing. 182 * DMA cycle timing is slower/equal than the fastest PIO timing.
192 */ 183 */
193 184 if (speed >= XFER_SW_DMA_0) {
194 if ((speed & XFER_MODE) != XFER_PIO) {
195 u8 pio = ide_get_best_pio_mode(drive, 255, 5); 185 u8 pio = ide_get_best_pio_mode(drive, 255, 5);
196 ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT); 186 ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT);
197 ide_timing_merge(&p, t, t, IDE_TIMING_ALL); 187 ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
198 } 188 }
199 189
200/* 190 /*
201 * Lengthen active & recovery time so that cycle time is correct. 191 * Lengthen active & recovery time so that cycle time is correct.
202 */ 192 */
203
204 if (t->act8b + t->rec8b < t->cyc8b) { 193 if (t->act8b + t->rec8b < t->cyc8b) {
205 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 194 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
206 t->rec8b = t->cyc8b - t->act8b; 195 t->rec8b = t->cyc8b - t->act8b;
@@ -213,5 +202,4 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing
213 202
214 return 0; 203 return 0;
215} 204}
216 205EXPORT_SYMBOL_GPL(ide_timing_compute);
217#endif
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 2b8453510e09..d4a6b102a772 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -50,29 +50,16 @@
50#include <linux/types.h> 50#include <linux/types.h>
51#include <linux/string.h> 51#include <linux/string.h>
52#include <linux/kernel.h> 52#include <linux/kernel.h>
53#include <linux/timer.h>
54#include <linux/mm.h>
55#include <linux/interrupt.h> 53#include <linux/interrupt.h>
56#include <linux/major.h> 54#include <linux/major.h>
57#include <linux/errno.h> 55#include <linux/errno.h>
58#include <linux/genhd.h> 56#include <linux/genhd.h>
59#include <linux/blkpg.h>
60#include <linux/slab.h> 57#include <linux/slab.h>
61#include <linux/init.h> 58#include <linux/init.h>
62#include <linux/pci.h> 59#include <linux/pci.h>
63#include <linux/delay.h>
64#include <linux/ide.h> 60#include <linux/ide.h>
65#include <linux/completion.h> 61#include <linux/completion.h>
66#include <linux/reboot.h>
67#include <linux/cdrom.h>
68#include <linux/seq_file.h>
69#include <linux/device.h> 62#include <linux/device.h>
70#include <linux/bitops.h>
71
72#include <asm/byteorder.h>
73#include <asm/irq.h>
74#include <asm/uaccess.h>
75#include <asm/io.h>
76 63
77 64
78/* default maximum number of failures */ 65/* default maximum number of failures */
@@ -91,8 +78,6 @@ DEFINE_MUTEX(ide_cfg_mtx);
91__cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock); 78__cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
92EXPORT_SYMBOL(ide_lock); 79EXPORT_SYMBOL(ide_lock);
93 80
94ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
95
96static void ide_port_init_devices_data(ide_hwif_t *); 81static void ide_port_init_devices_data(ide_hwif_t *);
97 82
98/* 83/*
@@ -121,7 +106,6 @@ void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
121 106
122 ide_port_init_devices_data(hwif); 107 ide_port_init_devices_data(hwif);
123} 108}
124EXPORT_SYMBOL_GPL(ide_init_port_data);
125 109
126static void ide_port_init_devices_data(ide_hwif_t *hwif) 110static void ide_port_init_devices_data(ide_hwif_t *hwif)
127{ 111{
@@ -150,18 +134,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
150 } 134 }
151} 135}
152 136
153static void __init init_ide_data (void)
154{
155 unsigned int index;
156
157 /* Initialise all interface structures */
158 for (index = 0; index < MAX_HWIFS; ++index) {
159 ide_hwif_t *hwif = &ide_hwifs[index];
160
161 ide_init_port_data(hwif, index);
162 }
163}
164
165void ide_remove_port_from_hwgroup(ide_hwif_t *hwif) 137void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
166{ 138{
167 ide_hwgroup_t *hwgroup = hwif->hwgroup; 139 ide_hwgroup_t *hwgroup = hwif->hwgroup;
@@ -312,7 +284,8 @@ void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
312 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); 284 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
313 hwif->irq = hw->irq; 285 hwif->irq = hw->irq;
314 hwif->chipset = hw->chipset; 286 hwif->chipset = hw->chipset;
315 hwif->gendev.parent = hw->dev; 287 hwif->dev = hw->dev;
288 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
316 hwif->ack_intr = hw->ack_intr; 289 hwif->ack_intr = hw->ack_intr;
317} 290}
318EXPORT_SYMBOL_GPL(ide_init_port_hw); 291EXPORT_SYMBOL_GPL(ide_init_port_hw);
@@ -556,6 +529,22 @@ static int generic_ide_resume(struct device *dev)
556 return err; 529 return err;
557} 530}
558 531
532static int generic_drive_reset(ide_drive_t *drive)
533{
534 struct request *rq;
535 int ret = 0;
536
537 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
538 rq->cmd_type = REQ_TYPE_SPECIAL;
539 rq->cmd_len = 1;
540 rq->cmd[0] = REQ_DRIVE_RESET;
541 rq->cmd_flags |= REQ_SOFTBARRIER;
542 if (blk_execute_rq(drive->queue, NULL, rq, 1))
543 ret = rq->errors;
544 blk_put_request(rq);
545 return ret;
546}
547
559int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev, 548int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev,
560 unsigned int cmd, unsigned long arg) 549 unsigned int cmd, unsigned long arg)
561{ 550{
@@ -630,33 +619,8 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
630 if (!capable(CAP_SYS_ADMIN)) 619 if (!capable(CAP_SYS_ADMIN))
631 return -EACCES; 620 return -EACCES;
632 621
633 /* 622 return generic_drive_reset(drive);
634 * Abort the current command on the
635 * group if there is one, taking
636 * care not to allow anything else
637 * to be queued and to die on the
638 * spot if we miss one somehow
639 */
640
641 spin_lock_irqsave(&ide_lock, flags);
642
643 if (HWGROUP(drive)->resetting) {
644 spin_unlock_irqrestore(&ide_lock, flags);
645 return -EBUSY;
646 }
647 623
648 ide_abort(drive, "drive reset");
649
650 BUG_ON(HWGROUP(drive)->handler);
651
652 /* Ensure nothing gets queued after we
653 drop the lock. Reset will clear the busy */
654
655 HWGROUP(drive)->busy = 1;
656 spin_unlock_irqrestore(&ide_lock, flags);
657 (void) ide_do_reset(drive);
658
659 return 0;
660 case HDIO_GET_BUSSTATE: 624 case HDIO_GET_BUSSTATE:
661 if (!capable(CAP_SYS_ADMIN)) 625 if (!capable(CAP_SYS_ADMIN))
662 return -EACCES; 626 return -EACCES;
@@ -1021,8 +985,6 @@ static int __init ide_init(void)
1021 goto out_port_class; 985 goto out_port_class;
1022 } 986 }
1023 987
1024 init_ide_data();
1025
1026 proc_ide_create(); 988 proc_ide_create();
1027 989
1028 return 0; 990 return 0;
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index 052125fafcfa..4ec19737f3c5 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -117,10 +117,11 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
117 u8 param1, param2, param3, param4; 117 u8 param1, param2, param3, param4;
118 unsigned long flags; 118 unsigned long flags;
119 int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50; 119 int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
120 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
120 121
121 /* calculate timing, according to PIO mode */ 122 /* calculate timing, according to PIO mode */
122 time1 = ide_pio_cycle_time(drive, pio); 123 time1 = ide_pio_cycle_time(drive, pio);
123 time2 = ide_pio_timings[pio].active_time; 124 time2 = t->active;
124 param3 = param1 = (time2 * bus_speed + 999) / 1000; 125 param3 = param1 = (time2 * bus_speed + 999) / 1000;
125 param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1; 126 param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1;
126 if (pio < 3) { 127 if (pio < 3) {
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index 9a1d27ef3f8a..0497e7f85b09 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -227,7 +227,6 @@ fail_base2:
227 if (hwif) { 227 if (hwif) {
228 u8 index = hwif->index; 228 u8 index = hwif->index;
229 229
230 ide_init_port_data(hwif, index);
231 ide_init_port_hw(hwif, &hw); 230 ide_init_port_hw(hwif, &hw);
232 231
233 idx[i] = index; 232 idx[i] = index;
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index af11028b4794..129a812bb57f 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -111,7 +111,6 @@ static int __init falconide_init(void)
111 u8 index = hwif->index; 111 u8 index = hwif->index;
112 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 112 u8 idx[4] = { index, 0xff, 0xff, 0xff };
113 113
114 ide_init_port_data(hwif, index);
115 ide_init_port_hw(hwif, &hw); 114 ide_init_port_hw(hwif, &hw);
116 115
117 /* Atari has a byte-swapped IDE interface */ 116 /* Atari has a byte-swapped IDE interface */
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index b78941680c32..7e74b20202df 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -185,7 +185,6 @@ found:
185 if (hwif) { 185 if (hwif) {
186 u8 index = hwif->index; 186 u8 index = hwif->index;
187 187
188 ide_init_port_data(hwif, index);
189 ide_init_port_hw(hwif, &hw); 188 ide_init_port_hw(hwif, &hw);
190 189
191 idx[i] = index; 190 idx[i] = index;
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index dd6dfb32e853..7bc8fd59ea9e 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -216,6 +216,7 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
216 216
217 if (pio) { 217 if (pio) {
218 unsigned int cycle_time; 218 unsigned int cycle_time;
219 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
219 220
220 cycle_time = ide_pio_cycle_time(drive, pio); 221 cycle_time = ide_pio_cycle_time(drive, pio);
221 222
@@ -224,10 +225,8 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
224 * actual cycle time for recovery and activity 225 * actual cycle time for recovery and activity
225 * according system bus speed. 226 * according system bus speed.
226 */ 227 */
227 active_time = ide_pio_timings[pio].active_time; 228 active_time = t->active;
228 recovery_time = cycle_time 229 recovery_time = cycle_time - active_time - t->setup;
229 - active_time
230 - ide_pio_timings[pio].setup_time;
231 /* 230 /*
232 * Cycle times should be Vesa bus cycles 231 * Cycle times should be Vesa bus cycles
233 */ 232 */
@@ -311,16 +310,16 @@ static void ht6560b_set_pio_mode(ide_drive_t *drive, const u8 pio)
311#endif 310#endif
312} 311}
313 312
314static void __init ht6560b_port_init_devs(ide_hwif_t *hwif) 313static void __init ht6560b_init_dev(ide_drive_t *drive)
315{ 314{
315 ide_hwif_t *hwif = drive->hwif;
316 /* Setting default configurations for drives. */ 316 /* Setting default configurations for drives. */
317 int t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT; 317 int t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
318 318
319 if (hwif->channel) 319 if (hwif->channel)
320 t |= (HT_SECONDARY_IF << 8); 320 t |= (HT_SECONDARY_IF << 8);
321 321
322 hwif->drives[0].drive_data = t; 322 drive->drive_data = t;
323 hwif->drives[1].drive_data = t;
324} 323}
325 324
326static int probe_ht6560b; 325static int probe_ht6560b;
@@ -329,7 +328,7 @@ module_param_named(probe, probe_ht6560b, bool, 0);
329MODULE_PARM_DESC(probe, "probe for HT6560B chipset"); 328MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
330 329
331static const struct ide_port_ops ht6560b_port_ops = { 330static const struct ide_port_ops ht6560b_port_ops = {
332 .port_init_devs = ht6560b_port_init_devs, 331 .init_dev = ht6560b_init_dev,
333 .set_pio_mode = ht6560b_set_pio_mode, 332 .set_pio_mode = ht6560b_set_pio_mode,
334 .selectproc = ht6560b_selectproc, 333 .selectproc = ht6560b_selectproc,
335}; 334};
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
index ecae916a3385..89c8ff0a4d08 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -11,6 +11,21 @@ static int probe_4drives;
11module_param_named(probe, probe_4drives, bool, 0); 11module_param_named(probe, probe_4drives, bool, 0);
12MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port"); 12MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
13 13
14static void ide_4drives_init_dev(ide_drive_t *drive)
15{
16 if (drive->hwif->channel)
17 drive->select.all ^= 0x20;
18}
19
20static const struct ide_port_ops ide_4drives_port_ops = {
21 .init_dev = ide_4drives_init_dev,
22};
23
24static const struct ide_port_info ide_4drives_port_info = {
25 .port_ops = &ide_4drives_port_ops,
26 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA,
27};
28
14static int __init ide_4drives_init(void) 29static int __init ide_4drives_init(void)
15{ 30{
16 ide_hwif_t *hwif, *mate; 31 ide_hwif_t *hwif, *mate;
@@ -49,18 +64,10 @@ static int __init ide_4drives_init(void)
49 mate = ide_find_port(); 64 mate = ide_find_port();
50 if (mate) { 65 if (mate) {
51 ide_init_port_hw(mate, &hw); 66 ide_init_port_hw(mate, &hw);
52 mate->drives[0].select.all ^= 0x20;
53 mate->drives[1].select.all ^= 0x20;
54 idx[1] = mate->index; 67 idx[1] = mate->index;
55
56 if (hwif) {
57 hwif->mate = mate;
58 mate->mate = hwif;
59 hwif->serialized = mate->serialized = 1;
60 }
61 } 68 }
62 69
63 ide_device_add(idx, NULL); 70 ide_device_add(idx, &ide_4drives_port_info);
64 71
65 return 0; 72 return 0;
66} 73}
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 8dbf4d9b6447..27b1e0b7ecb4 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -66,8 +66,6 @@ MODULE_LICENSE("Dual MPL/GPL");
66#ifdef CONFIG_PCMCIA_DEBUG 66#ifdef CONFIG_PCMCIA_DEBUG
67INT_MODULE_PARM(pc_debug, 0); 67INT_MODULE_PARM(pc_debug, 0);
68#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) 68#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
69/*static char *version =
70"ide-cs.c 1.3 2002/10/26 05:45:31 (David Hinds)";*/
71#else 69#else
72#define DEBUG(n, args...) 70#define DEBUG(n, args...)
73#endif 71#endif
@@ -154,6 +152,11 @@ static const struct ide_port_ops idecs_port_ops = {
154 .quirkproc = ide_undecoded_slave, 152 .quirkproc = ide_undecoded_slave,
155}; 153};
156 154
155static const struct ide_port_info idecs_port_info = {
156 .port_ops = &idecs_port_ops,
157 .host_flags = IDE_HFLAG_NO_DMA,
158};
159
157static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, 160static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
158 unsigned long irq, struct pcmcia_device *handle) 161 unsigned long irq, struct pcmcia_device *handle)
159{ 162{
@@ -187,13 +190,11 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
187 190
188 i = hwif->index; 191 i = hwif->index;
189 192
190 ide_init_port_data(hwif, i);
191 ide_init_port_hw(hwif, &hw); 193 ide_init_port_hw(hwif, &hw);
192 hwif->port_ops = &idecs_port_ops;
193 194
194 idx[0] = i; 195 idx[0] = i;
195 196
196 ide_device_add(idx, NULL); 197 ide_device_add(idx, &idecs_port_info);
197 198
198 if (hwif->present) 199 if (hwif->present)
199 return hwif; 200 return hwif;
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index d3bc3f24e05d..a249562b34b5 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -44,6 +44,10 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
44 hw->chipset = ide_generic; 44 hw->chipset = ide_generic;
45} 45}
46 46
47static const struct ide_port_info platform_ide_port_info = {
48 .host_flags = IDE_HFLAG_NO_DMA,
49};
50
47static int __devinit plat_ide_probe(struct platform_device *pdev) 51static int __devinit plat_ide_probe(struct platform_device *pdev)
48{ 52{
49 struct resource *res_base, *res_alt, *res_irq; 53 struct resource *res_base, *res_alt, *res_irq;
@@ -54,6 +58,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
54 int ret = 0; 58 int ret = 0;
55 int mmio = 0; 59 int mmio = 0;
56 hw_regs_t hw; 60 hw_regs_t hw;
61 struct ide_port_info d = platform_ide_port_info;
57 62
58 pdata = pdev->dev.platform_data; 63 pdata = pdev->dev.platform_data;
59 64
@@ -102,13 +107,13 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
102 ide_init_port_hw(hwif, &hw); 107 ide_init_port_hw(hwif, &hw);
103 108
104 if (mmio) { 109 if (mmio) {
105 hwif->host_flags = IDE_HFLAG_MMIO; 110 d.host_flags |= IDE_HFLAG_MMIO;
106 default_hwif_mmiops(hwif); 111 default_hwif_mmiops(hwif);
107 } 112 }
108 113
109 idx[0] = hwif->index; 114 idx[0] = hwif->index;
110 115
111 ide_device_add(idx, NULL); 116 ide_device_add(idx, &d);
112 117
113 platform_set_drvdata(pdev, hwif); 118 platform_set_drvdata(pdev, hwif);
114 119
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 2e84290d0bcc..0a6195bcfeda 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -130,7 +130,6 @@ static int __init macide_init(void)
130 u8 index = hwif->index; 130 u8 index = hwif->index;
131 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 131 u8 idx[4] = { index, 0xff, 0xff, 0xff };
132 132
133 ide_init_port_data(hwif, index);
134 ide_init_port_hw(hwif, &hw); 133 ide_init_port_hw(hwif, &hw);
135 134
136 ide_device_add(idx, NULL); 135 ide_device_add(idx, NULL);
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 8ff6e2d20834..9c2b9d078f69 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -142,7 +142,6 @@ static int __init q40ide_init(void)
142 142
143 hwif = ide_find_port(); 143 hwif = ide_find_port();
144 if (hwif) { 144 if (hwif) {
145 ide_init_port_data(hwif, hwif->index);
146 ide_init_port_hw(hwif, &hw); 145 ide_init_port_hw(hwif, &hw);
147 146
148 /* Q40 has a byte-swapped IDE interface */ 147 /* Q40 has a byte-swapped IDE interface */
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index 51dba82f8812..2338f344ea24 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -207,6 +207,7 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio)
207static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio) 207static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio)
208{ 208{
209 ide_hwif_t *hwif = drive->hwif; 209 ide_hwif_t *hwif = drive->hwif;
210 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
210 unsigned int cycle_time; 211 unsigned int cycle_time;
211 int active_time = 175; 212 int active_time = 175;
212 int recovery_time = 415; /* worst case values from the dos driver */ 213 int recovery_time = 415; /* worst case values from the dos driver */
@@ -236,7 +237,7 @@ static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio)
236 active_time = 110; 237 active_time = 110;
237 recovery_time = cycle_time - 120; 238 recovery_time = cycle_time - 120;
238 } else { 239 } else {
239 active_time = ide_pio_timings[pio].active_time; 240 active_time = t->active;
240 recovery_time = cycle_time - active_time; 241 recovery_time = cycle_time - active_time;
241 } 242 }
242 } 243 }
@@ -281,17 +282,18 @@ static int __init qd_testreg(int port)
281 return (readreg != QD_TESTVAL); 282 return (readreg != QD_TESTVAL);
282} 283}
283 284
284static void __init qd6500_port_init_devs(ide_hwif_t *hwif) 285static void __init qd6500_init_dev(ide_drive_t *drive)
285{ 286{
287 ide_hwif_t *hwif = drive->hwif;
286 u8 base = (hwif->config_data & 0xff00) >> 8; 288 u8 base = (hwif->config_data & 0xff00) >> 8;
287 u8 config = QD_CONFIG(hwif); 289 u8 config = QD_CONFIG(hwif);
288 290
289 hwif->drives[0].drive_data = QD6500_DEF_DATA; 291 drive->drive_data = QD6500_DEF_DATA;
290 hwif->drives[1].drive_data = QD6500_DEF_DATA;
291} 292}
292 293
293static void __init qd6580_port_init_devs(ide_hwif_t *hwif) 294static void __init qd6580_init_dev(ide_drive_t *drive)
294{ 295{
296 ide_hwif_t *hwif = drive->hwif;
295 u16 t1, t2; 297 u16 t1, t2;
296 u8 base = (hwif->config_data & 0xff00) >> 8; 298 u8 base = (hwif->config_data & 0xff00) >> 8;
297 u8 config = QD_CONFIG(hwif); 299 u8 config = QD_CONFIG(hwif);
@@ -302,18 +304,17 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
302 } else 304 } else
303 t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA; 305 t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA;
304 306
305 hwif->drives[0].drive_data = t1; 307 drive->drive_data = drive->select.b.unit ? t2 : t1;
306 hwif->drives[1].drive_data = t2;
307} 308}
308 309
309static const struct ide_port_ops qd6500_port_ops = { 310static const struct ide_port_ops qd6500_port_ops = {
310 .port_init_devs = qd6500_port_init_devs, 311 .init_dev = qd6500_init_dev,
311 .set_pio_mode = qd6500_set_pio_mode, 312 .set_pio_mode = qd6500_set_pio_mode,
312 .selectproc = qd65xx_select, 313 .selectproc = qd65xx_select,
313}; 314};
314 315
315static const struct ide_port_ops qd6580_port_ops = { 316static const struct ide_port_ops qd6580_port_ops = {
316 .port_init_devs = qd6580_port_init_devs, 317 .init_dev = qd6580_init_dev,
317 .set_pio_mode = qd6580_set_pio_mode, 318 .set_pio_mode = qd6580_set_pio_mode,
318 .selectproc = qd65xx_select, 319 .selectproc = qd65xx_select,
319}; 320};
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 1a6c27b32498..48d57cae63c6 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -213,10 +213,8 @@ static int auide_build_dmatable(ide_drive_t *drive)
213{ 213{
214 int i, iswrite, count = 0; 214 int i, iswrite, count = 0;
215 ide_hwif_t *hwif = HWIF(drive); 215 ide_hwif_t *hwif = HWIF(drive);
216
217 struct request *rq = HWGROUP(drive)->rq; 216 struct request *rq = HWGROUP(drive)->rq;
218 217 _auide_hwif *ahwif = &auide_hwif;
219 _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
220 struct scatterlist *sg; 218 struct scatterlist *sg;
221 219
222 iswrite = (rq_data_dir(rq) == WRITE); 220 iswrite = (rq_data_dir(rq) == WRITE);
@@ -402,7 +400,7 @@ static const struct ide_dma_ops au1xxx_dma_ops = {
402 400
403static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) 401static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
404{ 402{
405 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data; 403 _auide_hwif *auide = &auide_hwif;
406 dbdev_tab_t source_dev_tab, target_dev_tab; 404 dbdev_tab_t source_dev_tab, target_dev_tab;
407 u32 dev_id, tsize, devwidth, flags; 405 u32 dev_id, tsize, devwidth, flags;
408 406
@@ -463,7 +461,7 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
463#else 461#else
464static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) 462static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
465{ 463{
466 _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data; 464 _auide_hwif *auide = &auide_hwif;
467 dbdev_tab_t source_dev_tab; 465 dbdev_tab_t source_dev_tab;
468 int flags; 466 int flags;
469 467
@@ -600,8 +598,6 @@ static int au_ide_probe(struct device *dev)
600 598
601 ide_init_port_hw(hwif, &hw); 599 ide_init_port_hw(hwif, &hw);
602 600
603 hwif->dev = dev;
604
605 /* If the user has selected DDMA assisted copies, 601 /* If the user has selected DDMA assisted copies,
606 then set up a few local I/O function entry points 602 then set up a few local I/O function entry points
607 */ 603 */
@@ -610,11 +606,8 @@ static int au_ide_probe(struct device *dev)
610 hwif->input_data = au1xxx_input_data; 606 hwif->input_data = au1xxx_input_data;
611 hwif->output_data = au1xxx_output_data; 607 hwif->output_data = au1xxx_output_data;
612#endif 608#endif
613 hwif->select_data = 0; /* no chipset-specific code */
614 hwif->config_data = 0; /* no chipset-specific code */
615 609
616 auide_hwif.hwif = hwif; 610 auide_hwif.hwif = hwif;
617 hwif->hwif_data = &auide_hwif;
618 611
619 idx[0] = hwif->index; 612 idx[0] = hwif->index;
620 613
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 52fee3d2771a..9f1212cc4aed 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -61,6 +61,11 @@ static struct resource swarm_ide_resource = {
61 61
62static struct platform_device *swarm_ide_dev; 62static struct platform_device *swarm_ide_dev;
63 63
64static const struct ide_port_info swarm_port_info = {
65 .name = DRV_NAME,
66 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
67};
68
64/* 69/*
65 * swarm_ide_probe - if the board header indicates the existence of 70 * swarm_ide_probe - if the board header indicates the existence of
66 * Generic Bus IDE, allocate a HWIF for it. 71 * Generic Bus IDE, allocate a HWIF for it.
@@ -77,12 +82,6 @@ static int __devinit swarm_ide_probe(struct device *dev)
77 if (!SIBYTE_HAVE_IDE) 82 if (!SIBYTE_HAVE_IDE)
78 return -ENODEV; 83 return -ENODEV;
79 84
80 hwif = ide_find_port();
81 if (hwif == NULL) {
82 printk(KERN_ERR DRV_NAME ": no free slot for interface\n");
83 return -ENOMEM;
84 }
85
86 base = ioremap(A_IO_EXT_BASE, 0x800); 85 base = ioremap(A_IO_EXT_BASE, 0x800);
87 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS)); 86 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
88 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS)); 87 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
@@ -109,10 +108,6 @@ static int __devinit swarm_ide_probe(struct device *dev)
109 108
110 base = ioremap(offset, size); 109 base = ioremap(offset, size);
111 110
112 /* Setup MMIO ops. */
113 hwif->host_flags = IDE_HFLAG_MMIO;
114 default_hwif_mmiops(hwif);
115
116 for (i = 0; i <= 7; i++) 111 for (i = 0; i <= 7; i++)
117 hw.io_ports_array[i] = 112 hw.io_ports_array[i] =
118 (unsigned long)(base + ((0x1f0 + i) << 5)); 113 (unsigned long)(base + ((0x1f0 + i) << 5));
@@ -121,15 +116,26 @@ static int __devinit swarm_ide_probe(struct device *dev)
121 hw.irq = K_INT_GB_IDE; 116 hw.irq = K_INT_GB_IDE;
122 hw.chipset = ide_generic; 117 hw.chipset = ide_generic;
123 118
119 hwif = ide_find_port_slot(&swarm_port_info);
120 if (hwif == NULL)
121 goto err;
122
124 ide_init_port_hw(hwif, &hw); 123 ide_init_port_hw(hwif, &hw);
125 124
125 /* Setup MMIO ops. */
126 default_hwif_mmiops(hwif);
127
126 idx[0] = hwif->index; 128 idx[0] = hwif->index;
127 129
128 ide_device_add(idx, NULL); 130 ide_device_add(idx, &swarm_port_info);
129 131
130 dev_set_drvdata(dev, hwif); 132 dev_set_drvdata(dev, hwif);
131 133
132 return 0; 134 return 0;
135err:
136 release_resource(&swarm_ide_resource);
137 iounmap(base);
138 return -ENOMEM;
133} 139}
134 140
135static struct device_driver swarm_ide_driver = { 141static struct device_driver swarm_ide_driver = {
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index f2de00adf147..80d19c0eb780 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -69,7 +69,8 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
69{ 69{
70 ide_hwif_t *hwif = HWIF(drive); 70 ide_hwif_t *hwif = HWIF(drive);
71 struct pci_dev *dev = to_pci_dev(hwif->dev); 71 struct pci_dev *dev = to_pci_dev(hwif->dev);
72 int s_time, a_time, c_time; 72 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
73 int s_time = t->setup, a_time = t->active, c_time = t->cycle;
73 u8 s_clc, a_clc, r_clc; 74 u8 s_clc, a_clc, r_clc;
74 unsigned long flags; 75 unsigned long flags;
75 int bus_speed = ide_pci_clk ? ide_pci_clk : 33; 76 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
@@ -78,13 +79,10 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
78 u8 cd_dma_fifo = 0; 79 u8 cd_dma_fifo = 0;
79 int unit = drive->select.b.unit & 1; 80 int unit = drive->select.b.unit & 1;
80 81
81 s_time = ide_pio_timings[pio].setup_time;
82 a_time = ide_pio_timings[pio].active_time;
83 if ((s_clc = (s_time * bus_speed + 999) / 1000) >= 8) 82 if ((s_clc = (s_time * bus_speed + 999) / 1000) >= 8)
84 s_clc = 0; 83 s_clc = 0;
85 if ((a_clc = (a_time * bus_speed + 999) / 1000) >= 8) 84 if ((a_clc = (a_time * bus_speed + 999) / 1000) >= 8)
86 a_clc = 0; 85 a_clc = 0;
87 c_time = ide_pio_timings[pio].cycle_time;
88 86
89 if (!(r_clc = (c_time * bus_speed + 999) / 1000 - a_clc - s_clc)) { 87 if (!(r_clc = (c_time * bus_speed + 999) / 1000 - a_clc - s_clc)) {
90 r_clc = 1; 88 r_clc = 1;
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index ad222206a429..0bfcdd0e77b3 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -21,8 +21,6 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/ide.h> 22#include <linux/ide.h>
23 23
24#include "ide-timing.h"
25
26enum { 24enum {
27 AMD_IDE_CONFIG = 0x41, 25 AMD_IDE_CONFIG = 0x41,
28 AMD_CABLE_DETECT = 0x42, 26 AMD_CABLE_DETECT = 0x42,
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index cd1ba14984ab..1ad1e23e3105 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -521,6 +521,7 @@ static void program_drive_counts(ide_drive_t *drive, unsigned int index)
521static void cmd640_set_mode(ide_drive_t *drive, unsigned int index, 521static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
522 u8 pio_mode, unsigned int cycle_time) 522 u8 pio_mode, unsigned int cycle_time)
523{ 523{
524 struct ide_timing *t;
524 int setup_time, active_time, recovery_time, clock_time; 525 int setup_time, active_time, recovery_time, clock_time;
525 u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count; 526 u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count;
526 int bus_speed; 527 int bus_speed;
@@ -532,8 +533,11 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
532 533
533 if (pio_mode > 5) 534 if (pio_mode > 5)
534 pio_mode = 5; 535 pio_mode = 5;
535 setup_time = ide_pio_timings[pio_mode].setup_time; 536
536 active_time = ide_pio_timings[pio_mode].active_time; 537 t = ide_timing_find_mode(XFER_PIO_0 + pio_mode);
538 setup_time = t->setup;
539 active_time = t->active;
540
537 recovery_time = cycle_time - (setup_time + active_time); 541 recovery_time = cycle_time - (setup_time + active_time);
538 clock_time = 1000 / bus_speed; 542 clock_time = 1000 / bus_speed;
539 cycle_count = DIV_ROUND_UP(cycle_time, clock_time); 543 cycle_count = DIV_ROUND_UP(cycle_time, clock_time);
@@ -607,11 +611,40 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
607 611
608 display_clocks(index); 612 display_clocks(index);
609} 613}
614#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
615
616static void cmd640_init_dev(ide_drive_t *drive)
617{
618 unsigned int i = drive->hwif->channel * 2 + drive->select.b.unit;
619
620#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
621 /*
622 * Reset timing to the slowest speed and turn off prefetch.
623 * This way, the drive identify code has a better chance.
624 */
625 setup_counts[i] = 4; /* max possible */
626 active_counts[i] = 16; /* max possible */
627 recovery_counts[i] = 16; /* max possible */
628 program_drive_counts(drive, i);
629 set_prefetch_mode(drive, i, 0);
630 printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch cleared\n", i);
631#else
632 /*
633 * Set the drive unmask flags to match the prefetch setting.
634 */
635 check_prefetch(drive, i);
636 printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch(%s) preserved\n",
637 i, drive->no_io_32bit ? "off" : "on");
638#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
639}
640
610 641
611static const struct ide_port_ops cmd640_port_ops = { 642static const struct ide_port_ops cmd640_port_ops = {
643 .init_dev = cmd640_init_dev,
644#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
612 .set_pio_mode = cmd640_set_pio_mode, 645 .set_pio_mode = cmd640_set_pio_mode,
646#endif
613}; 647};
614#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
615 648
616static int pci_conf1(void) 649static int pci_conf1(void)
617{ 650{
@@ -654,10 +687,8 @@ static const struct ide_port_info cmd640_port_info __initdata = {
654 IDE_HFLAG_NO_DMA | 687 IDE_HFLAG_NO_DMA |
655 IDE_HFLAG_ABUSE_PREFETCH | 688 IDE_HFLAG_ABUSE_PREFETCH |
656 IDE_HFLAG_ABUSE_FAST_DEVSEL, 689 IDE_HFLAG_ABUSE_FAST_DEVSEL,
657#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
658 .port_ops = &cmd640_port_ops, 690 .port_ops = &cmd640_port_ops,
659 .pio_mask = ATA_PIO5, 691 .pio_mask = ATA_PIO5,
660#endif
661}; 692};
662 693
663static int cmd640x_init_one(unsigned long base, unsigned long ctl) 694static int cmd640x_init_one(unsigned long base, unsigned long ctl)
@@ -683,12 +714,8 @@ static int cmd640x_init_one(unsigned long base, unsigned long ctl)
683 */ 714 */
684static int __init cmd640x_init(void) 715static int __init cmd640x_init(void)
685{ 716{
686#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
687 int second_port_toggled = 0;
688#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
689 int second_port_cmd640 = 0, rc; 717 int second_port_cmd640 = 0, rc;
690 const char *bus_type, *port2; 718 const char *bus_type, *port2;
691 unsigned int index;
692 u8 b, cfr; 719 u8 b, cfr;
693 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 720 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
694 hw_regs_t hw[2]; 721 hw_regs_t hw[2];
@@ -774,88 +801,44 @@ static int __init cmd640x_init(void)
774 put_cmd640_reg(CMDTIM, 0); 801 put_cmd640_reg(CMDTIM, 0);
775 put_cmd640_reg(BRST, 0x40); 802 put_cmd640_reg(BRST, 0x40);
776 803
777 cmd_hwif1 = ide_find_port(); 804 b = get_cmd640_reg(CNTRL);
778 805
779 /* 806 /*
780 * Try to enable the secondary interface, if not already enabled 807 * Try to enable the secondary interface, if not already enabled
781 */ 808 */
782 if (cmd_hwif1 && 809 if (secondary_port_responding()) {
783 cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) { 810 if ((b & CNTRL_ENA_2ND)) {
784 port2 = "not probed"; 811 second_port_cmd640 = 1;
812 port2 = "okay";
813 } else if (cmd640_vlb) {
814 second_port_cmd640 = 1;
815 port2 = "alive";
816 } else
817 port2 = "not cmd640";
785 } else { 818 } else {
786 b = get_cmd640_reg(CNTRL); 819 put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */
787 if (secondary_port_responding()) { 820 if (secondary_port_responding()) {
788 if ((b & CNTRL_ENA_2ND)) { 821 second_port_cmd640 = 1;
789 second_port_cmd640 = 1; 822 port2 = "enabled";
790 port2 = "okay";
791 } else if (cmd640_vlb) {
792 second_port_cmd640 = 1;
793 port2 = "alive";
794 } else
795 port2 = "not cmd640";
796 } else { 823 } else {
797 put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */ 824 put_cmd640_reg(CNTRL, b); /* restore original setting */
798 if (secondary_port_responding()) { 825 port2 = "not responding";
799 second_port_cmd640 = 1;
800#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
801 second_port_toggled = 1;
802#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
803 port2 = "enabled";
804 } else {
805 put_cmd640_reg(CNTRL, b); /* restore original setting */
806 port2 = "not responding";
807 }
808 } 826 }
809 } 827 }
810 828
811 /* 829 /*
812 * Initialize data for secondary cmd640 port, if enabled 830 * Initialize data for secondary cmd640 port, if enabled
813 */ 831 */
814 if (second_port_cmd640 && cmd_hwif1) { 832 if (second_port_cmd640) {
815 ide_init_port_hw(cmd_hwif1, &hw[1]); 833 cmd_hwif1 = ide_find_port();
816 idx[1] = cmd_hwif1->index; 834 if (cmd_hwif1) {
835 ide_init_port_hw(cmd_hwif1, &hw[1]);
836 idx[1] = cmd_hwif1->index;
837 }
817 } 838 }
818 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n", 839 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
819 second_port_cmd640 ? "" : "not ", port2); 840 second_port_cmd640 ? "" : "not ", port2);
820 841
821 /*
822 * Establish initial timings/prefetch for all drives.
823 * Do not unnecessarily disturb any prior BIOS setup of these.
824 */
825 for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) {
826 ide_drive_t *drive;
827
828 if (index > 1) {
829 if (cmd_hwif1 == NULL)
830 continue;
831 drive = &cmd_hwif1->drives[index & 1];
832 } else {
833 if (cmd_hwif0 == NULL)
834 continue;
835 drive = &cmd_hwif0->drives[index & 1];
836 }
837
838#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
839 /*
840 * Reset timing to the slowest speed and turn off prefetch.
841 * This way, the drive identify code has a better chance.
842 */
843 setup_counts [index] = 4; /* max possible */
844 active_counts [index] = 16; /* max possible */
845 recovery_counts [index] = 16; /* max possible */
846 program_drive_counts(drive, index);
847 set_prefetch_mode(drive, index, 0);
848 printk("cmd640: drive%d timings/prefetch cleared\n", index);
849#else
850 /*
851 * Set the drive unmask flags to match the prefetch setting
852 */
853 check_prefetch(drive, index);
854 printk("cmd640: drive%d timings/prefetch(%s) preserved\n",
855 index, drive->no_io_32bit ? "off" : "on");
856#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
857 }
858
859#ifdef CMD640_DUMP_REGS 842#ifdef CMD640_DUMP_REGS
860 cmd640_dump_regs(); 843 cmd640_dump_regs();
861#endif 844#endif
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index ca4774aa27ee..cfa784bacf48 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -116,6 +116,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
116{ 116{
117 ide_hwif_t *hwif = HWIF(drive); 117 ide_hwif_t *hwif = HWIF(drive);
118 struct pci_dev *dev = to_pci_dev(hwif->dev); 118 struct pci_dev *dev = to_pci_dev(hwif->dev);
119 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
119 unsigned int cycle_time; 120 unsigned int cycle_time;
120 u8 setup_count, arttim = 0; 121 u8 setup_count, arttim = 0;
121 122
@@ -124,10 +125,9 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
124 125
125 cycle_time = ide_pio_cycle_time(drive, pio); 126 cycle_time = ide_pio_cycle_time(drive, pio);
126 127
127 program_cycle_times(drive, cycle_time, 128 program_cycle_times(drive, cycle_time, t->active);
128 ide_pio_timings[pio].active_time);
129 129
130 setup_count = quantize_timing(ide_pio_timings[pio].setup_time, 130 setup_count = quantize_timing(t->setup,
131 1000 / (ide_pci_clk ? ide_pci_clk : 33)); 131 1000 / (ide_pci_clk ? ide_pci_clk : 33));
132 132
133 /* 133 /*
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index 99fe91a191b8..dc97c48623f3 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -26,8 +26,6 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/ide.h> 27#include <linux/ide.h>
28 28
29#include "ide-timing.h"
30
31#define MSR_ATAC_BASE 0x51300000 29#define MSR_ATAC_BASE 0x51300000
32#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0) 30#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
33#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01) 31#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
@@ -75,13 +73,11 @@ static unsigned int cs5535_udma_timings[5] =
75 */ 73 */
76static void cs5535_set_speed(ide_drive_t *drive, const u8 speed) 74static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
77{ 75{
78
79 u32 reg = 0, dummy; 76 u32 reg = 0, dummy;
80 int unit = drive->select.b.unit; 77 int unit = drive->select.b.unit;
81 78
82
83 /* Set the PIO timings */ 79 /* Set the PIO timings */
84 if ((speed & XFER_MODE) == XFER_PIO) { 80 if (speed < XFER_SW_DMA_0) {
85 ide_drive_t *pair = ide_get_paired_drive(drive); 81 ide_drive_t *pair = ide_get_paired_drive(drive);
86 u8 cmd, pioa; 82 u8 cmd, pioa;
87 83
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index 8c534afcb6c8..e14ad5530fa4 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -133,6 +133,7 @@ static int calc_clk(int time, int bus_speed)
133 */ 133 */
134static void compute_clocks(u8 pio, pio_clocks_t *p_pclk) 134static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
135{ 135{
136 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
136 int clk1, clk2; 137 int clk1, clk2;
137 int bus_speed = ide_pci_clk ? ide_pci_clk : 33; 138 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
138 139
@@ -141,15 +142,13 @@ static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
141 */ 142 */
142 143
143 /* let's calc the address setup time clocks */ 144 /* let's calc the address setup time clocks */
144 p_pclk->address_time = (u8)calc_clk(ide_pio_timings[pio].setup_time, bus_speed); 145 p_pclk->address_time = (u8)calc_clk(t->setup, bus_speed);
145 146
146 /* let's calc the active and recovery time clocks */ 147 /* let's calc the active and recovery time clocks */
147 clk1 = calc_clk(ide_pio_timings[pio].active_time, bus_speed); 148 clk1 = calc_clk(t->active, bus_speed);
148 149
149 /* calc recovery timing */ 150 /* calc recovery timing */
150 clk2 = ide_pio_timings[pio].cycle_time - 151 clk2 = t->cycle - t->active - t->setup;
151 ide_pio_timings[pio].active_time -
152 ide_pio_timings[pio].setup_time;
153 152
154 clk2 = calc_clk(clk2, bus_speed); 153 clk2 = calc_clk(clk2, bus_speed);
155 154
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index af0f30051d5a..0106e2a2df77 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -93,7 +93,6 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
93 93
94 i = hwif->index; 94 i = hwif->index;
95 95
96 ide_init_port_data(hwif, i);
97 ide_init_port_hw(hwif, &hw); 96 ide_init_port_hw(hwif, &hw);
98 97
99 idx[0] = i; 98 idx[0] = i;
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 6ab04115286b..cbf647202994 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -512,8 +512,14 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
512} 512}
513 513
514static struct ide_dma_ops it821x_pass_through_dma_ops = { 514static struct ide_dma_ops it821x_pass_through_dma_ops = {
515 .dma_host_set = ide_dma_host_set,
516 .dma_setup = ide_dma_setup,
517 .dma_exec_cmd = ide_dma_exec_cmd,
515 .dma_start = it821x_dma_start, 518 .dma_start = it821x_dma_start,
516 .dma_end = it821x_dma_end, 519 .dma_end = it821x_dma_end,
520 .dma_test_irq = ide_dma_test_irq,
521 .dma_timeout = ide_dma_timeout,
522 .dma_lost_irq = ide_dma_lost_irq,
517}; 523};
518 524
519/** 525/**
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 1584ebb6a185..789c66dfbde5 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -558,12 +558,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
558 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 558 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
559 int i; 559 int i;
560 560
561 hwif = ide_find_port(); 561 hwif = ide_find_port_slot(d);
562 if (hwif == NULL) { 562 if (hwif == NULL)
563 printk(KERN_ERR "%s: too many IDE interfaces, "
564 "no room in table\n", SCC_PATA_NAME);
565 return -ENOMEM; 563 return -ENOMEM;
566 }
567 564
568 memset(&hw, 0, sizeof(hw)); 565 memset(&hw, 0, sizeof(hw));
569 for (i = 0; i <= 8; i++) 566 for (i = 0; i <= 8; i++)
@@ -572,7 +569,6 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
572 hw.dev = &dev->dev; 569 hw.dev = &dev->dev;
573 hw.chipset = ide_pci; 570 hw.chipset = ide_pci;
574 ide_init_port_hw(hwif, &hw); 571 ide_init_port_hw(hwif, &hw);
575 hwif->dev = &dev->dev;
576 572
577 idx[0] = hwif->index; 573 idx[0] = hwif->index;
578 574
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 24513e3dcd6b..c79ff5b41088 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -568,6 +568,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = {
568}; 568};
569 569
570static const struct ide_port_info sgiioc4_port_info __devinitdata = { 570static const struct ide_port_info sgiioc4_port_info __devinitdata = {
571 .name = DRV_NAME,
571 .chipset = ide_pci, 572 .chipset = ide_pci,
572 .init_dma = ide_dma_sgiioc4, 573 .init_dma = ide_dma_sgiioc4,
573 .port_ops = &sgiioc4_port_ops, 574 .port_ops = &sgiioc4_port_ops,
@@ -587,13 +588,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
587 hw_regs_t hw; 588 hw_regs_t hw;
588 struct ide_port_info d = sgiioc4_port_info; 589 struct ide_port_info d = sgiioc4_port_info;
589 590
590 hwif = ide_find_port();
591 if (hwif == NULL) {
592 printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n",
593 DRV_NAME);
594 return -ENOMEM;
595 }
596
597 /* Get the CmdBlk and CtrlBlk Base Registers */ 591 /* Get the CmdBlk and CtrlBlk Base Registers */
598 bar0 = pci_resource_start(dev, 0); 592 bar0 = pci_resource_start(dev, 0);
599 virt_base = ioremap(bar0, pci_resource_len(dev, 0)); 593 virt_base = ioremap(bar0, pci_resource_len(dev, 0));
@@ -608,11 +602,11 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
608 602
609 cmd_phys_base = bar0 + IOC4_CMD_OFFSET; 603 cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
610 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, 604 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
611 hwif->name)) { 605 DRV_NAME)) {
612 printk(KERN_ERR 606 printk(KERN_ERR
613 "%s : %s -- ERROR, Addresses " 607 "%s : %s -- ERROR, Addresses "
614 "0x%p to 0x%p ALREADY in use\n", 608 "0x%p to 0x%p ALREADY in use\n",
615 __func__, hwif->name, (void *) cmd_phys_base, 609 __func__, DRV_NAME, (void *) cmd_phys_base,
616 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); 610 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
617 return -ENOMEM; 611 return -ENOMEM;
618 } 612 }
@@ -623,9 +617,12 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
623 hw.irq = dev->irq; 617 hw.irq = dev->irq;
624 hw.chipset = ide_pci; 618 hw.chipset = ide_pci;
625 hw.dev = &dev->dev; 619 hw.dev = &dev->dev;
626 ide_init_port_hw(hwif, &hw);
627 620
628 hwif->dev = &dev->dev; 621 hwif = ide_find_port_slot(&d);
622 if (hwif == NULL)
623 goto err;
624
625 ide_init_port_hw(hwif, &hw);
629 626
630 /* The IOC4 uses MMIO rather than Port IO. */ 627 /* The IOC4 uses MMIO rather than Port IO. */
631 default_hwif_mmiops(hwif); 628 default_hwif_mmiops(hwif);
@@ -641,6 +638,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
641 return -EIO; 638 return -EIO;
642 639
643 return 0; 640 return 0;
641err:
642 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
643 iounmap(virt_base);
644 return -ENOMEM;
644} 645}
645 646
646static unsigned int __devinit 647static unsigned int __devinit
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index b75e9bb390a7..6e9d7655d89c 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -421,8 +421,7 @@ static int sil_sata_reset_poll(ide_drive_t *drive)
421 if ((sata_stat & 0x03) != 0x03) { 421 if ((sata_stat & 0x03) != 0x03) {
422 printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n", 422 printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n",
423 hwif->name, sata_stat); 423 hwif->name, sata_stat);
424 HWGROUP(drive)->polling = 0; 424 return -ENXIO;
425 return ide_started;
426 } 425 }
427 } 426 }
428 427
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index e127eb25ab63..2389945ca95d 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -52,8 +52,6 @@
52#include <linux/init.h> 52#include <linux/init.h>
53#include <linux/ide.h> 53#include <linux/ide.h>
54 54
55#include "ide-timing.h"
56
57/* registers layout and init values are chipset family dependant */ 55/* registers layout and init values are chipset family dependant */
58 56
59#define ATA_16 0x01 57#define ATA_16 0x01
@@ -616,7 +614,6 @@ MODULE_LICENSE("GPL");
616/* 614/*
617 * TODO: 615 * TODO:
618 * - CLEANUP 616 * - CLEANUP
619 * - Use drivers/ide/ide-timing.h !
620 * - More checks in the config registers (force values instead of 617 * - More checks in the config registers (force values instead of
621 * relying on the BIOS setting them correctly). 618 * relying on the BIOS setting them correctly).
622 * - Further optimisations ? 619 * - Further optimisations ?
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index ce84fa045d39..6efbde297174 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -47,10 +47,11 @@
47 */ 47 */
48static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio) 48static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio)
49{ 49{
50 struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
50 unsigned int cmd_on, cmd_off; 51 unsigned int cmd_on, cmd_off;
51 u8 iordy = 0; 52 u8 iordy = 0;
52 53
53 cmd_on = (ide_pio_timings[pio].active_time + 29) / 30; 54 cmd_on = (t->active + 29) / 30;
54 cmd_off = (ide_pio_cycle_time(drive, pio) - 30 * cmd_on + 29) / 30; 55 cmd_off = (ide_pio_cycle_time(drive, pio) - 30 * cmd_on + 29) / 30;
55 56
56 if (cmd_on == 0) 57 if (cmd_on == 0)
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 3ed9728abd24..e47384c70c40 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -35,8 +35,6 @@
35#include <asm/processor.h> 35#include <asm/processor.h>
36#endif 36#endif
37 37
38#include "ide-timing.h"
39
40#define VIA_IDE_ENABLE 0x40 38#define VIA_IDE_ENABLE 0x40
41#define VIA_IDE_CONFIG 0x41 39#define VIA_IDE_CONFIG 0x41
42#define VIA_FIFO_CONFIG 0x43 40#define VIA_FIFO_CONFIG 0x43
diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile
index 65af5848b28c..74e52adcdf4b 100644
--- a/drivers/ide/ppc/Makefile
+++ b/drivers/ide/ppc/Makefile
@@ -1,3 +1,2 @@
1 1
2obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o 2obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
3obj-$(CONFIG_BLK_DEV_MPC8xx_IDE) += mpc8xx.o
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
deleted file mode 100644
index 236f9c38e519..000000000000
--- a/drivers/ide/ppc/mpc8xx.c
+++ /dev/null
@@ -1,851 +0,0 @@
1/*
2 * Copyright (C) 2000, 2001 Wolfgang Denk, wd@denx.de
3 * Modified for direct IDE interface
4 * by Thomas Lange, thomas@corelatus.com
5 * Modified for direct IDE interface on 8xx without using the PCMCIA
6 * controller
7 * by Steven.Scholz@imc-berlin.de
8 * Moved out of arch/ppc/kernel/m8xx_setup.c, other minor cleanups
9 * by Mathew Locke <mattl@mvista.com>
10 */
11
12#include <linux/errno.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/stddef.h>
16#include <linux/unistd.h>
17#include <linux/ptrace.h>
18#include <linux/slab.h>
19#include <linux/user.h>
20#include <linux/tty.h>
21#include <linux/major.h>
22#include <linux/interrupt.h>
23#include <linux/reboot.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/ide.h>
27#include <linux/bootmem.h>
28
29#include <asm/mpc8xx.h>
30#include <asm/mmu.h>
31#include <asm/processor.h>
32#include <asm/io.h>
33#include <asm/pgtable.h>
34#include <asm/ide.h>
35#include <asm/8xx_immap.h>
36#include <asm/machdep.h>
37#include <asm/irq.h>
38
39#define DRV_NAME "ide-mpc8xx"
40
41static int identify (volatile u8 *p);
42static void print_fixed (volatile u8 *p);
43static void print_funcid (int func);
44static int check_ide_device (unsigned long base);
45
46static void ide_interrupt_ack (void *dev);
47static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio);
48
49typedef struct ide_ioport_desc {
50 unsigned long base_off; /* Offset to PCMCIA memory */
51 unsigned long reg_off[IDE_NR_PORTS]; /* controller register offsets */
52 int irq; /* IRQ */
53} ide_ioport_desc_t;
54
55ide_ioport_desc_t ioport_dsc[MAX_HWIFS] = {
56#ifdef IDE0_BASE_OFFSET
57 { IDE0_BASE_OFFSET,
58 {
59 IDE0_DATA_REG_OFFSET,
60 IDE0_ERROR_REG_OFFSET,
61 IDE0_NSECTOR_REG_OFFSET,
62 IDE0_SECTOR_REG_OFFSET,
63 IDE0_LCYL_REG_OFFSET,
64 IDE0_HCYL_REG_OFFSET,
65 IDE0_SELECT_REG_OFFSET,
66 IDE0_STATUS_REG_OFFSET,
67 IDE0_CONTROL_REG_OFFSET,
68 IDE0_IRQ_REG_OFFSET,
69 },
70 IDE0_INTERRUPT,
71 },
72#ifdef IDE1_BASE_OFFSET
73 { IDE1_BASE_OFFSET,
74 {
75 IDE1_DATA_REG_OFFSET,
76 IDE1_ERROR_REG_OFFSET,
77 IDE1_NSECTOR_REG_OFFSET,
78 IDE1_SECTOR_REG_OFFSET,
79 IDE1_LCYL_REG_OFFSET,
80 IDE1_HCYL_REG_OFFSET,
81 IDE1_SELECT_REG_OFFSET,
82 IDE1_STATUS_REG_OFFSET,
83 IDE1_CONTROL_REG_OFFSET,
84 IDE1_IRQ_REG_OFFSET,
85 },
86 IDE1_INTERRUPT,
87 },
88#endif /* IDE1_BASE_OFFSET */
89#endif /* IDE0_BASE_OFFSET */
90};
91
92ide_pio_timings_t ide_pio_clocks[6];
93int hold_time[6] = {30, 20, 15, 10, 10, 10 }; /* PIO Mode 5 with IORDY (nonstandard) */
94
95/*
96 * Warning: only 1 (ONE) PCMCIA slot supported here,
97 * which must be correctly initialized by the firmware (PPCBoot).
98 */
99static int _slot_ = -1; /* will be read from PCMCIA registers */
100
101/* Make clock cycles and always round up */
102#define PCMCIA_MK_CLKS( t, T ) (( (t) * ((T)/1000000) + 999U ) / 1000U )
103
104#define M8XX_PCMCIA_CD2(slot) (0x10000000 >> (slot << 4))
105#define M8XX_PCMCIA_CD1(slot) (0x08000000 >> (slot << 4))
106
107/*
108 * The TQM850L hardware has two pins swapped! Grrrrgh!
109 */
110#ifdef CONFIG_TQM850L
111#define __MY_PCMCIA_GCRX_CXRESET PCMCIA_GCRX_CXOE
112#define __MY_PCMCIA_GCRX_CXOE PCMCIA_GCRX_CXRESET
113#else
114#define __MY_PCMCIA_GCRX_CXRESET PCMCIA_GCRX_CXRESET
115#define __MY_PCMCIA_GCRX_CXOE PCMCIA_GCRX_CXOE
116#endif
117
118#if defined(CONFIG_BLK_DEV_MPC8xx_IDE) && defined(CONFIG_IDE_8xx_PCCARD)
119#define PCMCIA_SCHLVL IDE0_INTERRUPT /* Status Change Interrupt Level */
120static int pcmcia_schlvl = PCMCIA_SCHLVL;
121#endif
122
123/*
124 * See include/linux/ide.h for definition of hw_regs_t (p, base)
125 */
126
127/*
128 * m8xx_ide_init_ports() for a direct IDE interface _using_
129 * MPC8xx's internal PCMCIA interface
130 */
131#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT)
132static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
133{
134 unsigned long *p = hw->io_ports_array;
135 int i;
136
137 typedef struct {
138 ulong br;
139 ulong or;
140 } pcmcia_win_t;
141 volatile pcmcia_win_t *win;
142 volatile pcmconf8xx_t *pcmp;
143
144 uint *pgcrx;
145 u32 pcmcia_phy_base;
146 u32 pcmcia_phy_end;
147 static unsigned long pcmcia_base = 0;
148 unsigned long base;
149
150 *p = 0;
151
152 pcmp = (pcmconf8xx_t *)(&(((immap_t *)IMAP_ADDR)->im_pcmcia));
153
154 if (!pcmcia_base) {
155 /*
156 * Read out PCMCIA registers. Since the reset values
157 * are undefined, we sure hope that they have been
158 * set up by firmware
159 */
160
161 /* Scan all registers for valid settings */
162 pcmcia_phy_base = 0xFFFFFFFF;
163 pcmcia_phy_end = 0;
164 /* br0 is start of brX and orX regs */
165 win = (pcmcia_win_t *) \
166 (&(((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0));
167 for (i = 0; i < 8; i++) {
168 if (win->or & 1) { /* This bank is marked as valid */
169 if (win->br < pcmcia_phy_base) {
170 pcmcia_phy_base = win->br;
171 }
172 if ((win->br + PCMCIA_MEM_SIZE) > pcmcia_phy_end) {
173 pcmcia_phy_end = win->br + PCMCIA_MEM_SIZE;
174 }
175 /* Check which slot that has been defined */
176 _slot_ = (win->or >> 2) & 1;
177
178 } /* Valid bank */
179 win++;
180 } /* for */
181
182 printk ("PCMCIA slot %c: phys mem %08x...%08x (size %08x)\n",
183 'A' + _slot_,
184 pcmcia_phy_base, pcmcia_phy_end,
185 pcmcia_phy_end - pcmcia_phy_base);
186
187 if (!request_mem_region(pcmcia_phy_base,
188 pcmcia_phy_end - pcmcia_phy_base,
189 DRV_NAME)) {
190 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
191 return -EBUSY;
192 }
193
194 pcmcia_base=(unsigned long)ioremap(pcmcia_phy_base,
195 pcmcia_phy_end-pcmcia_phy_base);
196
197#ifdef DEBUG
198 printk ("PCMCIA virt base: %08lx\n", pcmcia_base);
199#endif
200 /* Compute clock cycles for PIO timings */
201 for (i=0; i<6; ++i) {
202 bd_t *binfo = (bd_t *)__res;
203
204 hold_time[i] =
205 PCMCIA_MK_CLKS (hold_time[i],
206 binfo->bi_busfreq);
207 ide_pio_clocks[i].setup_time =
208 PCMCIA_MK_CLKS (ide_pio_timings[i].setup_time,
209 binfo->bi_busfreq);
210 ide_pio_clocks[i].active_time =
211 PCMCIA_MK_CLKS (ide_pio_timings[i].active_time,
212 binfo->bi_busfreq);
213 ide_pio_clocks[i].cycle_time =
214 PCMCIA_MK_CLKS (ide_pio_timings[i].cycle_time,
215 binfo->bi_busfreq);
216#if 0
217 printk ("PIO mode %d timings: %d/%d/%d => %d/%d/%d\n",
218 i,
219 ide_pio_clocks[i].setup_time,
220 ide_pio_clocks[i].active_time,
221 ide_pio_clocks[i].hold_time,
222 ide_pio_clocks[i].cycle_time,
223 ide_pio_timings[i].setup_time,
224 ide_pio_timings[i].active_time,
225 ide_pio_timings[i].hold_time,
226 ide_pio_timings[i].cycle_time);
227#endif
228 }
229 }
230
231 if (_slot_ == -1) {
232 printk ("PCMCIA slot has not been defined! Using A as default\n");
233 _slot_ = 0;
234 }
235
236#ifdef CONFIG_IDE_8xx_PCCARD
237
238#ifdef DEBUG
239 printk ("PIPR = 0x%08X slot %c ==> mask = 0x%X\n",
240 pcmp->pcmc_pipr,
241 'A' + _slot_,
242 M8XX_PCMCIA_CD1(_slot_) | M8XX_PCMCIA_CD2(_slot_) );
243#endif /* DEBUG */
244
245 if (pcmp->pcmc_pipr & (M8XX_PCMCIA_CD1(_slot_)|M8XX_PCMCIA_CD2(_slot_))) {
246 printk ("No card in slot %c: PIPR=%08x\n",
247 'A' + _slot_, (u32) pcmp->pcmc_pipr);
248 return -ENODEV; /* No card in slot */
249 }
250
251 check_ide_device (pcmcia_base);
252
253#endif /* CONFIG_IDE_8xx_PCCARD */
254
255 base = pcmcia_base + ioport_dsc[data_port].base_off;
256#ifdef DEBUG
257 printk ("base: %08x + %08x = %08x\n",
258 pcmcia_base, ioport_dsc[data_port].base_off, base);
259#endif
260
261 for (i = 0; i < IDE_NR_PORTS; ++i) {
262#ifdef DEBUG
263 printk ("port[%d]: %08x + %08x = %08x\n",
264 i,
265 base,
266 ioport_dsc[data_port].reg_off[i],
267 i, base + ioport_dsc[data_port].reg_off[i]);
268#endif
269 *p++ = base + ioport_dsc[data_port].reg_off[i];
270 }
271
272 hw->irq = ioport_dsc[data_port].irq;
273 hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack;
274
275#ifdef CONFIG_IDE_8xx_PCCARD
276 {
277 unsigned int reg;
278
279 if (_slot_)
280 pgcrx = &((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pgcrb;
281 else
282 pgcrx = &((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pgcra;
283
284 reg = *pgcrx;
285 reg |= mk_int_int_mask (pcmcia_schlvl) << 24;
286 reg |= mk_int_int_mask (pcmcia_schlvl) << 16;
287 *pgcrx = reg;
288 }
289#endif /* CONFIG_IDE_8xx_PCCARD */
290
291 /* Enable Harddisk Interrupt,
292 * and make it edge sensitive
293 */
294 /* (11-18) Set edge detect for irq, no wakeup from low power mode */
295 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel |=
296 (0x80000000 >> ioport_dsc[data_port].irq);
297
298#ifdef CONFIG_IDE_8xx_PCCARD
299 /* Make sure we don't get garbage irq */
300 ((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pscr = 0xFFFF;
301
302 /* Enable falling edge irq */
303 pcmp->pcmc_per = 0x100000 >> (16 * _slot_);
304#endif /* CONFIG_IDE_8xx_PCCARD */
305
306 hw->chipset = ide_generic;
307
308 return 0;
309}
310#endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */
311
312/*
313 * m8xx_ide_init_ports() for a direct IDE interface _not_ using
314 * MPC8xx's internal PCMCIA interface
315 */
316#if defined(CONFIG_IDE_EXT_DIRECT)
317static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
318{
319 unsigned long *p = hw->io_ports_array;
320 int i;
321
322 u32 ide_phy_base;
323 u32 ide_phy_end;
324 static unsigned long ide_base = 0;
325 unsigned long base;
326
327 *p = 0;
328
329 if (!ide_base) {
330
331 /* TODO:
332 * - add code to read ORx, BRx
333 */
334 ide_phy_base = CFG_ATA_BASE_ADDR;
335 ide_phy_end = CFG_ATA_BASE_ADDR + 0x200;
336
337 printk ("IDE phys mem : %08x...%08x (size %08x)\n",
338 ide_phy_base, ide_phy_end,
339 ide_phy_end - ide_phy_base);
340
341 if (!request_mem_region(ide_phy_base, 0x200, DRV_NAME)) {
342 printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
343 return -EBUSY;
344 }
345
346 ide_base=(unsigned long)ioremap(ide_phy_base,
347 ide_phy_end-ide_phy_base);
348
349#ifdef DEBUG
350 printk ("IDE virt base: %08lx\n", ide_base);
351#endif
352 }
353
354 base = ide_base + ioport_dsc[data_port].base_off;
355#ifdef DEBUG
356 printk ("base: %08x + %08x = %08x\n",
357 ide_base, ioport_dsc[data_port].base_off, base);
358#endif
359
360 for (i = 0; i < IDE_NR_PORTS; ++i) {
361#ifdef DEBUG
362 printk ("port[%d]: %08x + %08x = %08x\n",
363 i,
364 base,
365 ioport_dsc[data_port].reg_off[i],
366 i, base + ioport_dsc[data_port].reg_off[i]);
367#endif
368 *p++ = base + ioport_dsc[data_port].reg_off[i];
369 }
370
371 /* direct connected IDE drive, i.e. external IRQ */
372 hw->irq = ioport_dsc[data_port].irq;
373 hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack;
374
375 /* Enable Harddisk Interrupt,
376 * and make it edge sensitive
377 */
378 /* (11-18) Set edge detect for irq, no wakeup from low power mode */
379 ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |=
380 (0x80000000 >> ioport_dsc[data_port].irq);
381
382 hw->chipset = ide_generic;
383
384 return 0;
385}
386#endif /* CONFIG_IDE_8xx_DIRECT */
387
388
389/* -------------------------------------------------------------------- */
390
391
392/* PCMCIA Timing */
393#ifndef PCMCIA_SHT
394#define PCMCIA_SHT(t) ((t & 0x0F)<<16) /* Strobe Hold Time */
395#define PCMCIA_SST(t) ((t & 0x0F)<<12) /* Strobe Setup Time */
396#define PCMCIA_SL(t) ((t==32) ? 0 : ((t & 0x1F)<<7)) /* Strobe Length */
397#endif
398
399/* Calculate PIO timings */
400static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
401{
402#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT)
403 volatile pcmconf8xx_t *pcmp;
404 ulong timing, mask, reg;
405
406 pcmp = (pcmconf8xx_t *)(&(((immap_t *)IMAP_ADDR)->im_pcmcia));
407
408 mask = ~(PCMCIA_SHT(0xFF) | PCMCIA_SST(0xFF) | PCMCIA_SL(0xFF));
409
410 timing = PCMCIA_SHT(hold_time[pio] )
411 | PCMCIA_SST(ide_pio_clocks[pio].setup_time )
412 | PCMCIA_SL (ide_pio_clocks[pio].active_time)
413 ;
414
415#if 1
416 printk ("Setting timing bits 0x%08lx in PCMCIA controller\n", timing);
417#endif
418 if ((reg = pcmp->pcmc_por0 & mask) != 0)
419 pcmp->pcmc_por0 = reg | timing;
420
421 if ((reg = pcmp->pcmc_por1 & mask) != 0)
422 pcmp->pcmc_por1 = reg | timing;
423
424 if ((reg = pcmp->pcmc_por2 & mask) != 0)
425 pcmp->pcmc_por2 = reg | timing;
426
427 if ((reg = pcmp->pcmc_por3 & mask) != 0)
428 pcmp->pcmc_por3 = reg | timing;
429
430 if ((reg = pcmp->pcmc_por4 & mask) != 0)
431 pcmp->pcmc_por4 = reg | timing;
432
433 if ((reg = pcmp->pcmc_por5 & mask) != 0)
434 pcmp->pcmc_por5 = reg | timing;
435
436 if ((reg = pcmp->pcmc_por6 & mask) != 0)
437 pcmp->pcmc_por6 = reg | timing;
438
439 if ((reg = pcmp->pcmc_por7 & mask) != 0)
440 pcmp->pcmc_por7 = reg | timing;
441
442#elif defined(CONFIG_IDE_EXT_DIRECT)
443
444 printk("%s[%d] %s: not implemented yet!\n",
445 __FILE__, __LINE__, __func__);
446#endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */
447}
448
449static const struct ide_port_ops m8xx_port_ops = {
450 .set_pio_mode = m8xx_ide_set_pio_mode,
451};
452
453static void
454ide_interrupt_ack (void *dev)
455{
456#ifdef CONFIG_IDE_8xx_PCCARD
457 u_int pscr, pipr;
458
459#if (PCMCIA_SOCKETS_NO == 2)
460 u_int _slot_;
461#endif
462
463 /* get interrupt sources */
464
465 pscr = ((volatile immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr;
466 pipr = ((volatile immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pipr;
467
468 /*
469 * report only if both card detect signals are the same
470 * not too nice done,
471 * we depend on that CD2 is the bit to the left of CD1...
472 */
473
474 if(_slot_==-1){
475 printk("PCMCIA slot has not been defined! Using A as default\n");
476 _slot_=0;
477 }
478
479 if(((pipr & M8XX_PCMCIA_CD2(_slot_)) >> 1) ^
480 (pipr & M8XX_PCMCIA_CD1(_slot_)) ) {
481 printk ("card detect interrupt\n");
482 }
483 /* clear the interrupt sources */
484 ((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr = pscr;
485
486#else /* ! CONFIG_IDE_8xx_PCCARD */
487 /*
488 * Only CONFIG_IDE_8xx_PCCARD is using the interrupt of the
489 * MPC8xx's PCMCIA controller, so there is nothing to be done here
490 * for CONFIG_IDE_8xx_DIRECT and CONFIG_IDE_EXT_DIRECT.
491 * The interrupt is handled somewhere else. -- Steven
492 */
493#endif /* CONFIG_IDE_8xx_PCCARD */
494}
495
496
497
498/*
499 * CIS Tupel codes
500 */
501#define CISTPL_NULL 0x00
502#define CISTPL_DEVICE 0x01
503#define CISTPL_LONGLINK_CB 0x02
504#define CISTPL_INDIRECT 0x03
505#define CISTPL_CONFIG_CB 0x04
506#define CISTPL_CFTABLE_ENTRY_CB 0x05
507#define CISTPL_LONGLINK_MFC 0x06
508#define CISTPL_BAR 0x07
509#define CISTPL_PWR_MGMNT 0x08
510#define CISTPL_EXTDEVICE 0x09
511#define CISTPL_CHECKSUM 0x10
512#define CISTPL_LONGLINK_A 0x11
513#define CISTPL_LONGLINK_C 0x12
514#define CISTPL_LINKTARGET 0x13
515#define CISTPL_NO_LINK 0x14
516#define CISTPL_VERS_1 0x15
517#define CISTPL_ALTSTR 0x16
518#define CISTPL_DEVICE_A 0x17
519#define CISTPL_JEDEC_C 0x18
520#define CISTPL_JEDEC_A 0x19
521#define CISTPL_CONFIG 0x1a
522#define CISTPL_CFTABLE_ENTRY 0x1b
523#define CISTPL_DEVICE_OC 0x1c
524#define CISTPL_DEVICE_OA 0x1d
525#define CISTPL_DEVICE_GEO 0x1e
526#define CISTPL_DEVICE_GEO_A 0x1f
527#define CISTPL_MANFID 0x20
528#define CISTPL_FUNCID 0x21
529#define CISTPL_FUNCE 0x22
530#define CISTPL_SWIL 0x23
531#define CISTPL_END 0xff
532
533/*
534 * CIS Function ID codes
535 */
536#define CISTPL_FUNCID_MULTI 0x00
537#define CISTPL_FUNCID_MEMORY 0x01
538#define CISTPL_FUNCID_SERIAL 0x02
539#define CISTPL_FUNCID_PARALLEL 0x03
540#define CISTPL_FUNCID_FIXED 0x04
541#define CISTPL_FUNCID_VIDEO 0x05
542#define CISTPL_FUNCID_NETWORK 0x06
543#define CISTPL_FUNCID_AIMS 0x07
544#define CISTPL_FUNCID_SCSI 0x08
545
546/*
547 * Fixed Disk FUNCE codes
548 */
549#define CISTPL_IDE_INTERFACE 0x01
550
551#define CISTPL_FUNCE_IDE_IFACE 0x01
552#define CISTPL_FUNCE_IDE_MASTER 0x02
553#define CISTPL_FUNCE_IDE_SLAVE 0x03
554
555/* First feature byte */
556#define CISTPL_IDE_SILICON 0x04
557#define CISTPL_IDE_UNIQUE 0x08
558#define CISTPL_IDE_DUAL 0x10
559
560/* Second feature byte */
561#define CISTPL_IDE_HAS_SLEEP 0x01
562#define CISTPL_IDE_HAS_STANDBY 0x02
563#define CISTPL_IDE_HAS_IDLE 0x04
564#define CISTPL_IDE_LOW_POWER 0x08
565#define CISTPL_IDE_REG_INHIBIT 0x10
566#define CISTPL_IDE_HAS_INDEX 0x20
567#define CISTPL_IDE_IOIS16 0x40
568
569
570/* -------------------------------------------------------------------- */
571
572
573#define MAX_TUPEL_SZ 512
574#define MAX_FEATURES 4
575
576static int check_ide_device (unsigned long base)
577{
578 volatile u8 *ident = NULL;
579 volatile u8 *feature_p[MAX_FEATURES];
580 volatile u8 *p, *start;
581 int n_features = 0;
582 u8 func_id = ~0;
583 u8 code, len;
584 unsigned short config_base = 0;
585 int found = 0;
586 int i;
587
588#ifdef DEBUG
589 printk ("PCMCIA MEM: %08lX\n", base);
590#endif
591 start = p = (volatile u8 *) base;
592
593 while ((p - start) < MAX_TUPEL_SZ) {
594
595 code = *p; p += 2;
596
597 if (code == 0xFF) { /* End of chain */
598 break;
599 }
600
601 len = *p; p += 2;
602#ifdef DEBUG_PCMCIA
603 { volatile u8 *q = p;
604 printk ("\nTuple code %02x length %d\n\tData:",
605 code, len);
606
607 for (i = 0; i < len; ++i) {
608 printk (" %02x", *q);
609 q+= 2;
610 }
611 }
612#endif /* DEBUG_PCMCIA */
613 switch (code) {
614 case CISTPL_VERS_1:
615 ident = p + 4;
616 break;
617 case CISTPL_FUNCID:
618 func_id = *p;
619 break;
620 case CISTPL_FUNCE:
621 if (n_features < MAX_FEATURES)
622 feature_p[n_features++] = p;
623 break;
624 case CISTPL_CONFIG:
625 config_base = (*(p+6) << 8) + (*(p+4));
626 default:
627 break;
628 }
629 p += 2 * len;
630 }
631
632 found = identify (ident);
633
634 if (func_id != ((u8)~0)) {
635 print_funcid (func_id);
636
637 if (func_id == CISTPL_FUNCID_FIXED)
638 found = 1;
639 else
640 return (1); /* no disk drive */
641 }
642
643 for (i=0; i<n_features; ++i) {
644 print_fixed (feature_p[i]);
645 }
646
647 if (!found) {
648 printk ("unknown card type\n");
649 return (1);
650 }
651
652 /* set level mode irq and I/O mapped device in config reg*/
653 *((u8 *)(base + config_base)) = 0x41;
654
655 return (0);
656}
657
658/* ------------------------------------------------------------------------- */
659
660static void print_funcid (int func)
661{
662 switch (func) {
663 case CISTPL_FUNCID_MULTI:
664 printk (" Multi-Function");
665 break;
666 case CISTPL_FUNCID_MEMORY:
667 printk (" Memory");
668 break;
669 case CISTPL_FUNCID_SERIAL:
670 printk (" Serial Port");
671 break;
672 case CISTPL_FUNCID_PARALLEL:
673 printk (" Parallel Port");
674 break;
675 case CISTPL_FUNCID_FIXED:
676 printk (" Fixed Disk");
677 break;
678 case CISTPL_FUNCID_VIDEO:
679 printk (" Video Adapter");
680 break;
681 case CISTPL_FUNCID_NETWORK:
682 printk (" Network Adapter");
683 break;
684 case CISTPL_FUNCID_AIMS:
685 printk (" AIMS Card");
686 break;
687 case CISTPL_FUNCID_SCSI:
688 printk (" SCSI Adapter");
689 break;
690 default:
691 printk (" Unknown");
692 break;
693 }
694 printk (" Card\n");
695}
696
697/* ------------------------------------------------------------------------- */
698
699static void print_fixed (volatile u8 *p)
700{
701 if (p == NULL)
702 return;
703
704 switch (*p) {
705 case CISTPL_FUNCE_IDE_IFACE:
706 { u8 iface = *(p+2);
707
708 printk ((iface == CISTPL_IDE_INTERFACE) ? " IDE" : " unknown");
709 printk (" interface ");
710 break;
711 }
712 case CISTPL_FUNCE_IDE_MASTER:
713 case CISTPL_FUNCE_IDE_SLAVE:
714 { u8 f1 = *(p+2);
715 u8 f2 = *(p+4);
716
717 printk ((f1 & CISTPL_IDE_SILICON) ? " [silicon]" : " [rotating]");
718
719 if (f1 & CISTPL_IDE_UNIQUE)
720 printk (" [unique]");
721
722 printk ((f1 & CISTPL_IDE_DUAL) ? " [dual]" : " [single]");
723
724 if (f2 & CISTPL_IDE_HAS_SLEEP)
725 printk (" [sleep]");
726
727 if (f2 & CISTPL_IDE_HAS_STANDBY)
728 printk (" [standby]");
729
730 if (f2 & CISTPL_IDE_HAS_IDLE)
731 printk (" [idle]");
732
733 if (f2 & CISTPL_IDE_LOW_POWER)
734 printk (" [low power]");
735
736 if (f2 & CISTPL_IDE_REG_INHIBIT)
737 printk (" [reg inhibit]");
738
739 if (f2 & CISTPL_IDE_HAS_INDEX)
740 printk (" [index]");
741
742 if (f2 & CISTPL_IDE_IOIS16)
743 printk (" [IOis16]");
744
745 break;
746 }
747 }
748 printk ("\n");
749}
750
751/* ------------------------------------------------------------------------- */
752
753
754#define MAX_IDENT_CHARS 64
755#define MAX_IDENT_FIELDS 4
756
757static u8 *known_cards[] = {
758 "ARGOSY PnPIDE D5",
759 NULL
760};
761
762static int identify (volatile u8 *p)
763{
764 u8 id_str[MAX_IDENT_CHARS];
765 u8 data;
766 u8 *t;
767 u8 **card;
768 int i, done;
769
770 if (p == NULL)
771 return (0); /* Don't know */
772
773 t = id_str;
774 done =0;
775
776 for (i=0; i<=4 && !done; ++i, p+=2) {
777 while ((data = *p) != '\0') {
778 if (data == 0xFF) {
779 done = 1;
780 break;
781 }
782 *t++ = data;
783 if (t == &id_str[MAX_IDENT_CHARS-1]) {
784 done = 1;
785 break;
786 }
787 p += 2;
788 }
789 if (!done)
790 *t++ = ' ';
791 }
792 *t = '\0';
793 while (--t > id_str) {
794 if (*t == ' ')
795 *t = '\0';
796 else
797 break;
798 }
799 printk ("Card ID: %s\n", id_str);
800
801 for (card=known_cards; *card; ++card) {
802 if (strcmp(*card, id_str) == 0) { /* found! */
803 return (1);
804 }
805 }
806
807 return (0); /* don't know */
808}
809
810static int __init mpc8xx_ide_probe(void)
811{
812 hw_regs_t hw;
813 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
814
815#ifdef IDE0_BASE_OFFSET
816 memset(&hw, 0, sizeof(hw));
817 if (!m8xx_ide_init_ports(&hw, 0)) {
818 ide_hwif_t *hwif = ide_find_port();
819
820 if (hwif) {
821 ide_init_port_hw(hwif, &hw);
822 hwif->pio_mask = ATA_PIO4;
823 hwif->port_ops = &m8xx_port_ops;
824
825 idx[0] = hwif->index;
826 }
827 }
828#ifdef IDE1_BASE_OFFSET
829 memset(&hw, 0, sizeof(hw));
830 if (!m8xx_ide_init_ports(&hw, 1)) {
831 ide_hwif_t *mate = ide_find_port();
832
833 if (mate) {
834 ide_init_port_hw(mate, &hw);
835 mate->pio_mask = ATA_PIO4;
836 mate->port_ops = &m8xx_port_ops;
837
838 idx[1] = mate->index;
839 }
840 }
841#endif
842#endif
843
844 ide_device_add(idx, NULL);
845
846 return 0;
847}
848
849module_init(mpc8xx_ide_probe);
850
851MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index dcb2c466bb97..93fb9067c043 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -5,7 +5,7 @@
5 * for doing DMA. 5 * for doing DMA.
6 * 6 *
7 * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt 7 * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt
8 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 8 * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
@@ -48,8 +48,6 @@
48#include <asm/mediabay.h> 48#include <asm/mediabay.h>
49#endif 49#endif
50 50
51#include "../ide-timing.h"
52
53#undef IDE_PMAC_DEBUG 51#undef IDE_PMAC_DEBUG
54 52
55#define DMA_WAIT_TIMEOUT 50 53#define DMA_WAIT_TIMEOUT 50
@@ -495,6 +493,7 @@ static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
495static void 493static void
496pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) 494pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
497{ 495{
496 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
498 u32 *timings, t; 497 u32 *timings, t;
499 unsigned accessTicks, recTicks; 498 unsigned accessTicks, recTicks;
500 unsigned accessTime, recTime; 499 unsigned accessTime, recTime;
@@ -526,10 +525,9 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
526 } 525 }
527 case controller_kl_ata4: 526 case controller_kl_ata4:
528 /* 66Mhz cell */ 527 /* 66Mhz cell */
529 recTime = cycle_time - ide_pio_timings[pio].active_time 528 recTime = cycle_time - tim->active - tim->setup;
530 - ide_pio_timings[pio].setup_time;
531 recTime = max(recTime, 150U); 529 recTime = max(recTime, 150U);
532 accessTime = ide_pio_timings[pio].active_time; 530 accessTime = tim->active;
533 accessTime = max(accessTime, 150U); 531 accessTime = max(accessTime, 150U);
534 accessTicks = SYSCLK_TICKS_66(accessTime); 532 accessTicks = SYSCLK_TICKS_66(accessTime);
535 accessTicks = min(accessTicks, 0x1fU); 533 accessTicks = min(accessTicks, 0x1fU);
@@ -542,10 +540,9 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
542 default: { 540 default: {
543 /* 33Mhz cell */ 541 /* 33Mhz cell */
544 int ebit = 0; 542 int ebit = 0;
545 recTime = cycle_time - ide_pio_timings[pio].active_time 543 recTime = cycle_time - tim->active - tim->setup;
546 - ide_pio_timings[pio].setup_time;
547 recTime = max(recTime, 150U); 544 recTime = max(recTime, 150U);
548 accessTime = ide_pio_timings[pio].active_time; 545 accessTime = tim->active;
549 accessTime = max(accessTime, 150U); 546 accessTime = max(accessTime, 150U);
550 accessTicks = SYSCLK_TICKS(accessTime); 547 accessTicks = SYSCLK_TICKS(accessTime);
551 accessTicks = min(accessTicks, 0x1fU); 548 accessTicks = min(accessTicks, 0x1fU);
@@ -1151,8 +1148,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1151 base = ioremap(macio_resource_start(mdev, 0), 0x400); 1148 base = ioremap(macio_resource_start(mdev, 0), 0x400);
1152 regbase = (unsigned long) base; 1149 regbase = (unsigned long) base;
1153 1150
1154 hwif->dev = &mdev->bus->pdev->dev;
1155
1156 pmif->mdev = mdev; 1151 pmif->mdev = mdev;
1157 pmif->node = mdev->ofdev.node; 1152 pmif->node = mdev->ofdev.node;
1158 pmif->regbase = regbase; 1153 pmif->regbase = regbase;
@@ -1174,7 +1169,8 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1174 memset(&hw, 0, sizeof(hw)); 1169 memset(&hw, 0, sizeof(hw));
1175 pmac_ide_init_ports(&hw, pmif->regbase); 1170 pmac_ide_init_ports(&hw, pmif->regbase);
1176 hw.irq = irq; 1171 hw.irq = irq;
1177 hw.dev = &mdev->ofdev.dev; 1172 hw.dev = &mdev->bus->pdev->dev;
1173 hw.parent = &mdev->ofdev.dev;
1178 1174
1179 rc = pmac_ide_setup_device(pmif, hwif, &hw); 1175 rc = pmac_ide_setup_device(pmif, hwif, &hw);
1180 if (rc != 0) { 1176 if (rc != 0) {
@@ -1274,7 +1270,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1274 goto out_free_pmif; 1270 goto out_free_pmif;
1275 } 1271 }
1276 1272
1277 hwif->dev = &pdev->dev;
1278 pmif->mdev = NULL; 1273 pmif->mdev = NULL;
1279 pmif->node = np; 1274 pmif->node = np;
1280 1275
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index abcfb1739d4d..65fc08b6b6d0 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -6,19 +6,15 @@
6 * May be copied or modified under the terms of the GNU General Public License 6 * May be copied or modified under the terms of the GNU General Public License
7 */ 7 */
8 8
9#include <linux/module.h>
10#include <linux/types.h> 9#include <linux/types.h>
11#include <linux/kernel.h> 10#include <linux/kernel.h>
12#include <linux/pci.h> 11#include <linux/pci.h>
13#include <linux/init.h> 12#include <linux/init.h>
14#include <linux/timer.h>
15#include <linux/mm.h>
16#include <linux/interrupt.h> 13#include <linux/interrupt.h>
17#include <linux/ide.h> 14#include <linux/ide.h>
18#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
19 16
20#include <asm/io.h> 17#include <asm/io.h>
21#include <asm/irq.h>
22 18
23/** 19/**
24 * ide_setup_pci_baseregs - place a PCI IDE controller native 20 * ide_setup_pci_baseregs - place a PCI IDE controller native
@@ -319,25 +315,22 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
319 315
320 ctl = pci_resource_start(dev, 2*port+1); 316 ctl = pci_resource_start(dev, 2*port+1);
321 base = pci_resource_start(dev, 2*port); 317 base = pci_resource_start(dev, 2*port);
322 if ((ctl && !base) || (base && !ctl)) { 318 } else {
323 printk(KERN_ERR "%s: inconsistent baseregs (BIOS) "
324 "for port %d, skipping\n", d->name, port);
325 return NULL;
326 }
327 }
328 if (!ctl) {
329 /* Use default values */ 319 /* Use default values */
330 ctl = port ? 0x374 : 0x3f4; 320 ctl = port ? 0x374 : 0x3f4;
331 base = port ? 0x170 : 0x1f0; 321 base = port ? 0x170 : 0x1f0;
332 } 322 }
333 323
334 hwif = ide_find_port_slot(d); 324 if (!base || !ctl) {
335 if (hwif == NULL) { 325 printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n",
336 printk(KERN_ERR "%s: too many IDE interfaces, no room in " 326 d->name, port);
337 "table\n", d->name);
338 return NULL; 327 return NULL;
339 } 328 }
340 329
330 hwif = ide_find_port_slot(d);
331 if (hwif == NULL)
332 return NULL;
333
341 memset(&hw, 0, sizeof(hw)); 334 memset(&hw, 0, sizeof(hw));
342 hw.irq = irq; 335 hw.irq = irq;
343 hw.dev = &dev->dev; 336 hw.dev = &dev->dev;
@@ -346,8 +339,6 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
346 339
347 ide_init_port_hw(hwif, &hw); 340 ide_init_port_hw(hwif, &hw);
348 341
349 hwif->dev = &dev->dev;
350
351 return hwif; 342 return hwif;
352} 343}
353 344
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 356fac6d105a..5a1cf2580e16 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -71,6 +71,10 @@
71 71
72#include "iscsi_iser.h" 72#include "iscsi_iser.h"
73 73
74static struct scsi_host_template iscsi_iser_sht;
75static struct iscsi_transport iscsi_iser_transport;
76static struct scsi_transport_template *iscsi_iser_scsi_transport;
77
74static unsigned int iscsi_max_lun = 512; 78static unsigned int iscsi_max_lun = 512;
75module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 79module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
76 80
@@ -91,7 +95,6 @@ iscsi_iser_recv(struct iscsi_conn *conn,
91 struct iscsi_hdr *hdr, char *rx_data, int rx_data_len) 95 struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
92{ 96{
93 int rc = 0; 97 int rc = 0;
94 uint32_t ret_itt;
95 int datalen; 98 int datalen;
96 int ahslen; 99 int ahslen;
97 100
@@ -107,12 +110,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
107 /* read AHS */ 110 /* read AHS */
108 ahslen = hdr->hlength * 4; 111 ahslen = hdr->hlength * 4;
109 112
110 /* verify itt (itt encoding: age+cid+itt) */ 113 rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
111 rc = iscsi_verify_itt(conn, hdr, &ret_itt);
112
113 if (!rc)
114 rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
115
116 if (rc && rc != ISCSI_ERR_NO_SCSI_CMD) 114 if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
117 goto error; 115 goto error;
118 116
@@ -123,25 +121,33 @@ error:
123 121
124 122
125/** 123/**
126 * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands 124 * iscsi_iser_task_init - Initialize task
125 * @task: iscsi task
127 * 126 *
128 **/ 127 * Initialize the task for the scsi command or mgmt command.
128 */
129static int 129static int
130iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask) 130iscsi_iser_task_init(struct iscsi_task *task)
131{ 131{
132 struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data; 132 struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
133 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 133 struct iscsi_iser_task *iser_task = task->dd_data;
134
135 /* mgmt task */
136 if (!task->sc) {
137 iser_task->desc.data = task->data;
138 return 0;
139 }
134 140
135 iser_ctask->command_sent = 0; 141 iser_task->command_sent = 0;
136 iser_ctask->iser_conn = iser_conn; 142 iser_task->iser_conn = iser_conn;
137 iser_ctask_rdma_init(iser_ctask); 143 iser_task_rdma_init(iser_task);
138 return 0; 144 return 0;
139} 145}
140 146
141/** 147/**
142 * iscsi_mtask_xmit - xmit management(immediate) task 148 * iscsi_iser_mtask_xmit - xmit management(immediate) task
143 * @conn: iscsi connection 149 * @conn: iscsi connection
144 * @mtask: task management task 150 * @task: task management task
145 * 151 *
146 * Notes: 152 * Notes:
147 * The function can return -EAGAIN in which case caller must 153 * The function can return -EAGAIN in which case caller must
@@ -150,20 +156,19 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
150 * 156 *
151 **/ 157 **/
152static int 158static int
153iscsi_iser_mtask_xmit(struct iscsi_conn *conn, 159iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
154 struct iscsi_mgmt_task *mtask)
155{ 160{
156 int error = 0; 161 int error = 0;
157 162
158 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt); 163 debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
159 164
160 error = iser_send_control(conn, mtask); 165 error = iser_send_control(conn, task);
161 166
162 /* since iser xmits control with zero copy, mtasks can not be recycled 167 /* since iser xmits control with zero copy, tasks can not be recycled
163 * right after sending them. 168 * right after sending them.
164 * The recycling scheme is based on whether a response is expected 169 * The recycling scheme is based on whether a response is expected
165 * - if yes, the mtask is recycled at iscsi_complete_pdu 170 * - if yes, the task is recycled at iscsi_complete_pdu
166 * - if no, the mtask is recycled at iser_snd_completion 171 * - if no, the task is recycled at iser_snd_completion
167 */ 172 */
168 if (error && error != -ENOBUFS) 173 if (error && error != -ENOBUFS)
169 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 174 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
@@ -172,97 +177,86 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
172} 177}
173 178
174static int 179static int
175iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn, 180iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
176 struct iscsi_cmd_task *ctask) 181 struct iscsi_task *task)
177{ 182{
178 struct iscsi_data hdr; 183 struct iscsi_data hdr;
179 int error = 0; 184 int error = 0;
180 185
181 /* Send data-out PDUs while there's still unsolicited data to send */ 186 /* Send data-out PDUs while there's still unsolicited data to send */
182 while (ctask->unsol_count > 0) { 187 while (task->unsol_count > 0) {
183 iscsi_prep_unsolicit_data_pdu(ctask, &hdr); 188 iscsi_prep_unsolicit_data_pdu(task, &hdr);
184 debug_scsi("Sending data-out: itt 0x%x, data count %d\n", 189 debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
185 hdr.itt, ctask->data_count); 190 hdr.itt, task->data_count);
186 191
187 /* the buffer description has been passed with the command */ 192 /* the buffer description has been passed with the command */
188 /* Send the command */ 193 /* Send the command */
189 error = iser_send_data_out(conn, ctask, &hdr); 194 error = iser_send_data_out(conn, task, &hdr);
190 if (error) { 195 if (error) {
191 ctask->unsol_datasn--; 196 task->unsol_datasn--;
192 goto iscsi_iser_ctask_xmit_unsol_data_exit; 197 goto iscsi_iser_task_xmit_unsol_data_exit;
193 } 198 }
194 ctask->unsol_count -= ctask->data_count; 199 task->unsol_count -= task->data_count;
195 debug_scsi("Need to send %d more as data-out PDUs\n", 200 debug_scsi("Need to send %d more as data-out PDUs\n",
196 ctask->unsol_count); 201 task->unsol_count);
197 } 202 }
198 203
199iscsi_iser_ctask_xmit_unsol_data_exit: 204iscsi_iser_task_xmit_unsol_data_exit:
200 return error; 205 return error;
201} 206}
202 207
203static int 208static int
204iscsi_iser_ctask_xmit(struct iscsi_conn *conn, 209iscsi_iser_task_xmit(struct iscsi_task *task)
205 struct iscsi_cmd_task *ctask)
206{ 210{
207 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 211 struct iscsi_conn *conn = task->conn;
212 struct iscsi_iser_task *iser_task = task->dd_data;
208 int error = 0; 213 int error = 0;
209 214
210 if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { 215 if (!task->sc)
211 BUG_ON(scsi_bufflen(ctask->sc) == 0); 216 return iscsi_iser_mtask_xmit(conn, task);
217
218 if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
219 BUG_ON(scsi_bufflen(task->sc) == 0);
212 220
213 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", 221 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
214 ctask->itt, scsi_bufflen(ctask->sc), 222 task->itt, scsi_bufflen(task->sc),
215 ctask->imm_count, ctask->unsol_count); 223 task->imm_count, task->unsol_count);
216 } 224 }
217 225
218 debug_scsi("ctask deq [cid %d itt 0x%x]\n", 226 debug_scsi("task deq [cid %d itt 0x%x]\n",
219 conn->id, ctask->itt); 227 conn->id, task->itt);
220 228
221 /* Send the cmd PDU */ 229 /* Send the cmd PDU */
222 if (!iser_ctask->command_sent) { 230 if (!iser_task->command_sent) {
223 error = iser_send_command(conn, ctask); 231 error = iser_send_command(conn, task);
224 if (error) 232 if (error)
225 goto iscsi_iser_ctask_xmit_exit; 233 goto iscsi_iser_task_xmit_exit;
226 iser_ctask->command_sent = 1; 234 iser_task->command_sent = 1;
227 } 235 }
228 236
229 /* Send unsolicited data-out PDU(s) if necessary */ 237 /* Send unsolicited data-out PDU(s) if necessary */
230 if (ctask->unsol_count) 238 if (task->unsol_count)
231 error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); 239 error = iscsi_iser_task_xmit_unsol_data(conn, task);
232 240
233 iscsi_iser_ctask_xmit_exit: 241 iscsi_iser_task_xmit_exit:
234 if (error && error != -ENOBUFS) 242 if (error && error != -ENOBUFS)
235 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 243 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
236 return error; 244 return error;
237} 245}
238 246
239static void 247static void
240iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 248iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
241{ 249{
242 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 250 struct iscsi_iser_task *iser_task = task->dd_data;
243 251
244 if (iser_ctask->status == ISER_TASK_STATUS_STARTED) { 252 /* mgmt tasks do not need special cleanup */
245 iser_ctask->status = ISER_TASK_STATUS_COMPLETED; 253 if (!task->sc)
246 iser_ctask_rdma_finalize(iser_ctask); 254 return;
247 }
248}
249
250static struct iser_conn *
251iscsi_iser_ib_conn_lookup(__u64 ep_handle)
252{
253 struct iser_conn *ib_conn;
254 struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
255 255
256 mutex_lock(&ig.connlist_mutex); 256 if (iser_task->status == ISER_TASK_STATUS_STARTED) {
257 list_for_each_entry(ib_conn, &ig.connlist, conn_list) { 257 iser_task->status = ISER_TASK_STATUS_COMPLETED;
258 if (ib_conn == uib_conn) { 258 iser_task_rdma_finalize(iser_task);
259 mutex_unlock(&ig.connlist_mutex);
260 return ib_conn;
261 }
262 } 259 }
263 mutex_unlock(&ig.connlist_mutex);
264 iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
265 return NULL;
266} 260}
267 261
268static struct iscsi_cls_conn * 262static struct iscsi_cls_conn *
@@ -272,7 +266,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
272 struct iscsi_cls_conn *cls_conn; 266 struct iscsi_cls_conn *cls_conn;
273 struct iscsi_iser_conn *iser_conn; 267 struct iscsi_iser_conn *iser_conn;
274 268
275 cls_conn = iscsi_conn_setup(cls_session, conn_idx); 269 cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
276 if (!cls_conn) 270 if (!cls_conn)
277 return NULL; 271 return NULL;
278 conn = cls_conn->dd_data; 272 conn = cls_conn->dd_data;
@@ -283,21 +277,11 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
283 */ 277 */
284 conn->max_recv_dlength = 128; 278 conn->max_recv_dlength = 128;
285 279
286 iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL); 280 iser_conn = conn->dd_data;
287 if (!iser_conn)
288 goto conn_alloc_fail;
289
290 /* currently this is the only field which need to be initiated */
291 rwlock_init(&iser_conn->lock);
292
293 conn->dd_data = iser_conn; 281 conn->dd_data = iser_conn;
294 iser_conn->iscsi_conn = conn; 282 iser_conn->iscsi_conn = conn;
295 283
296 return cls_conn; 284 return cls_conn;
297
298conn_alloc_fail:
299 iscsi_conn_teardown(cls_conn);
300 return NULL;
301} 285}
302 286
303static void 287static void
@@ -305,11 +289,18 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
305{ 289{
306 struct iscsi_conn *conn = cls_conn->dd_data; 290 struct iscsi_conn *conn = cls_conn->dd_data;
307 struct iscsi_iser_conn *iser_conn = conn->dd_data; 291 struct iscsi_iser_conn *iser_conn = conn->dd_data;
292 struct iser_conn *ib_conn = iser_conn->ib_conn;
308 293
309 iscsi_conn_teardown(cls_conn); 294 iscsi_conn_teardown(cls_conn);
310 if (iser_conn->ib_conn) 295 /*
311 iser_conn->ib_conn->iser_conn = NULL; 296 * Userspace will normally call the stop callback and
312 kfree(iser_conn); 297 * already have freed the ib_conn, but if it goofed up then
298 * we free it here.
299 */
300 if (ib_conn) {
301 ib_conn->iser_conn = NULL;
302 iser_conn_put(ib_conn);
303 }
313} 304}
314 305
315static int 306static int
@@ -320,6 +311,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
320 struct iscsi_conn *conn = cls_conn->dd_data; 311 struct iscsi_conn *conn = cls_conn->dd_data;
321 struct iscsi_iser_conn *iser_conn; 312 struct iscsi_iser_conn *iser_conn;
322 struct iser_conn *ib_conn; 313 struct iser_conn *ib_conn;
314 struct iscsi_endpoint *ep;
323 int error; 315 int error;
324 316
325 error = iscsi_conn_bind(cls_session, cls_conn, is_leading); 317 error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
@@ -328,12 +320,14 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
328 320
329 /* the transport ep handle comes from user space so it must be 321 /* the transport ep handle comes from user space so it must be
330 * verified against the global ib connections list */ 322 * verified against the global ib connections list */
331 ib_conn = iscsi_iser_ib_conn_lookup(transport_eph); 323 ep = iscsi_lookup_endpoint(transport_eph);
332 if (!ib_conn) { 324 if (!ep) {
333 iser_err("can't bind eph %llx\n", 325 iser_err("can't bind eph %llx\n",
334 (unsigned long long)transport_eph); 326 (unsigned long long)transport_eph);
335 return -EINVAL; 327 return -EINVAL;
336 } 328 }
329 ib_conn = ep->dd_data;
330
337 /* binds the iSER connection retrieved from the previously 331 /* binds the iSER connection retrieved from the previously
338 * connected ep_handle to the iSCSI layer connection. exchanges 332 * connected ep_handle to the iSCSI layer connection. exchanges
339 * connection pointers */ 333 * connection pointers */
@@ -341,10 +335,30 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
341 iser_conn = conn->dd_data; 335 iser_conn = conn->dd_data;
342 ib_conn->iser_conn = iser_conn; 336 ib_conn->iser_conn = iser_conn;
343 iser_conn->ib_conn = ib_conn; 337 iser_conn->ib_conn = ib_conn;
338 iser_conn_get(ib_conn);
339 return 0;
340}
344 341
345 conn->recv_lock = &iser_conn->lock; 342static void
343iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
344{
345 struct iscsi_conn *conn = cls_conn->dd_data;
346 struct iscsi_iser_conn *iser_conn = conn->dd_data;
347 struct iser_conn *ib_conn = iser_conn->ib_conn;
346 348
347 return 0; 349 /*
350 * Userspace may have goofed up and not bound the connection or
351 * might have only partially setup the connection.
352 */
353 if (ib_conn) {
354 iscsi_conn_stop(cls_conn, flag);
355 /*
356 * There is no unbind event so the stop callback
357 * must release the ref from the bind.
358 */
359 iser_conn_put(ib_conn);
360 }
361 iser_conn->ib_conn = NULL;
348} 362}
349 363
350static int 364static int
@@ -360,55 +374,75 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
360 return iscsi_conn_start(cls_conn); 374 return iscsi_conn_start(cls_conn);
361} 375}
362 376
363static struct iscsi_transport iscsi_iser_transport; 377static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
378{
379 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
380
381 iscsi_host_remove(shost);
382 iscsi_host_free(shost);
383}
364 384
365static struct iscsi_cls_session * 385static struct iscsi_cls_session *
366iscsi_iser_session_create(struct iscsi_transport *iscsit, 386iscsi_iser_session_create(struct iscsi_endpoint *ep,
367 struct scsi_transport_template *scsit, 387 uint16_t cmds_max, uint16_t qdepth,
368 uint16_t cmds_max, uint16_t qdepth, 388 uint32_t initial_cmdsn, uint32_t *hostno)
369 uint32_t initial_cmdsn, uint32_t *hostno)
370{ 389{
371 struct iscsi_cls_session *cls_session; 390 struct iscsi_cls_session *cls_session;
372 struct iscsi_session *session; 391 struct iscsi_session *session;
392 struct Scsi_Host *shost;
373 int i; 393 int i;
374 uint32_t hn; 394 struct iscsi_task *task;
375 struct iscsi_cmd_task *ctask; 395 struct iscsi_iser_task *iser_task;
376 struct iscsi_mgmt_task *mtask; 396 struct iser_conn *ib_conn;
377 struct iscsi_iser_cmd_task *iser_ctask; 397
378 struct iser_desc *desc; 398 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
399 if (!shost)
400 return NULL;
401 shost->transportt = iscsi_iser_scsi_transport;
402 shost->max_lun = iscsi_max_lun;
403 shost->max_id = 0;
404 shost->max_channel = 0;
405 shost->max_cmd_len = 16;
406
407 /*
408 * older userspace tools (before 2.0-870) did not pass us
409 * the leading conn's ep so this will be NULL;
410 */
411 if (ep)
412 ib_conn = ep->dd_data;
413
414 if (iscsi_host_add(shost,
415 ep ? ib_conn->device->ib_device->dma_device : NULL))
416 goto free_host;
417 *hostno = shost->host_no;
379 418
380 /* 419 /*
381 * we do not support setting can_queue cmd_per_lun from userspace yet 420 * we do not support setting can_queue cmd_per_lun from userspace yet
382 * because we preallocate so many resources 421 * because we preallocate so many resources
383 */ 422 */
384 cls_session = iscsi_session_setup(iscsit, scsit, 423 cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
385 ISCSI_DEF_XMIT_CMDS_MAX, 424 ISCSI_DEF_XMIT_CMDS_MAX,
386 ISCSI_MAX_CMD_PER_LUN, 425 sizeof(struct iscsi_iser_task),
387 sizeof(struct iscsi_iser_cmd_task), 426 initial_cmdsn, 0);
388 sizeof(struct iser_desc),
389 initial_cmdsn, &hn);
390 if (!cls_session) 427 if (!cls_session)
391 return NULL; 428 goto remove_host;
392 429 session = cls_session->dd_data;
393 *hostno = hn;
394 session = class_to_transport_session(cls_session);
395 430
431 shost->can_queue = session->scsi_cmds_max;
396 /* libiscsi setup itts, data and pool so just set desc fields */ 432 /* libiscsi setup itts, data and pool so just set desc fields */
397 for (i = 0; i < session->cmds_max; i++) { 433 for (i = 0; i < session->cmds_max; i++) {
398 ctask = session->cmds[i]; 434 task = session->cmds[i];
399 iser_ctask = ctask->dd_data; 435 iser_task = task->dd_data;
400 ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header; 436 task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
401 ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header); 437 task->hdr_max = sizeof(iser_task->desc.iscsi_header);
402 }
403
404 for (i = 0; i < session->mgmtpool_max; i++) {
405 mtask = session->mgmt_cmds[i];
406 desc = mtask->dd_data;
407 mtask->hdr = &desc->iscsi_header;
408 desc->data = mtask->data;
409 } 438 }
410
411 return cls_session; 439 return cls_session;
440
441remove_host:
442 iscsi_host_remove(shost);
443free_host:
444 iscsi_host_free(shost);
445 return NULL;
412} 446}
413 447
414static int 448static int
@@ -481,34 +515,37 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
481 stats->custom[3].value = conn->fmr_unalign_cnt; 515 stats->custom[3].value = conn->fmr_unalign_cnt;
482} 516}
483 517
484static int 518static struct iscsi_endpoint *
485iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking, 519iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
486 __u64 *ep_handle)
487{ 520{
488 int err; 521 int err;
489 struct iser_conn *ib_conn; 522 struct iser_conn *ib_conn;
523 struct iscsi_endpoint *ep;
490 524
491 err = iser_conn_init(&ib_conn); 525 ep = iscsi_create_endpoint(sizeof(*ib_conn));
492 if (err) 526 if (!ep)
493 goto out; 527 return ERR_PTR(-ENOMEM);
494 528
495 err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking); 529 ib_conn = ep->dd_data;
496 if (!err) 530 ib_conn->ep = ep;
497 *ep_handle = (__u64)(unsigned long)ib_conn; 531 iser_conn_init(ib_conn);
498 532
499out: 533 err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
500 return err; 534 non_blocking);
535 if (err) {
536 iscsi_destroy_endpoint(ep);
537 return ERR_PTR(err);
538 }
539 return ep;
501} 540}
502 541
503static int 542static int
504iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) 543iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
505{ 544{
506 struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); 545 struct iser_conn *ib_conn;
507 int rc; 546 int rc;
508 547
509 if (!ib_conn) 548 ib_conn = ep->dd_data;
510 return -EINVAL;
511
512 rc = wait_event_interruptible_timeout(ib_conn->wait, 549 rc = wait_event_interruptible_timeout(ib_conn->wait,
513 ib_conn->state == ISER_CONN_UP, 550 ib_conn->state == ISER_CONN_UP,
514 msecs_to_jiffies(timeout_ms)); 551 msecs_to_jiffies(timeout_ms));
@@ -530,13 +567,21 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
530} 567}
531 568
532static void 569static void
533iscsi_iser_ep_disconnect(__u64 ep_handle) 570iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
534{ 571{
535 struct iser_conn *ib_conn; 572 struct iser_conn *ib_conn;
536 573
537 ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); 574 ib_conn = ep->dd_data;
538 if (!ib_conn) 575 if (ib_conn->iser_conn)
539 return; 576 /*
577 * Must suspend xmit path if the ep is bound to the
578 * iscsi_conn, so we know we are not accessing the ib_conn
579 * when we free it.
580 *
581 * This may not be bound if the ep poll failed.
582 */
583 iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
584
540 585
541 iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); 586 iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
542 iser_conn_terminate(ib_conn); 587 iser_conn_terminate(ib_conn);
@@ -547,7 +592,6 @@ static struct scsi_host_template iscsi_iser_sht = {
547 .name = "iSCSI Initiator over iSER, v." DRV_VER, 592 .name = "iSCSI Initiator over iSER, v." DRV_VER,
548 .queuecommand = iscsi_queuecommand, 593 .queuecommand = iscsi_queuecommand,
549 .change_queue_depth = iscsi_change_queue_depth, 594 .change_queue_depth = iscsi_change_queue_depth,
550 .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
551 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, 595 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
552 .max_sectors = 1024, 596 .max_sectors = 1024,
553 .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, 597 .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
@@ -581,17 +625,14 @@ static struct iscsi_transport iscsi_iser_transport = {
581 ISCSI_USERNAME | ISCSI_PASSWORD | 625 ISCSI_USERNAME | ISCSI_PASSWORD |
582 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 626 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
583 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 627 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
584 ISCSI_PING_TMO | ISCSI_RECV_TMO, 628 ISCSI_PING_TMO | ISCSI_RECV_TMO |
629 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
585 .host_param_mask = ISCSI_HOST_HWADDRESS | 630 .host_param_mask = ISCSI_HOST_HWADDRESS |
586 ISCSI_HOST_NETDEV_NAME | 631 ISCSI_HOST_NETDEV_NAME |
587 ISCSI_HOST_INITIATOR_NAME, 632 ISCSI_HOST_INITIATOR_NAME,
588 .host_template = &iscsi_iser_sht,
589 .conndata_size = sizeof(struct iscsi_conn),
590 .max_lun = ISCSI_ISER_MAX_LUN,
591 .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
592 /* session management */ 633 /* session management */
593 .create_session = iscsi_iser_session_create, 634 .create_session = iscsi_iser_session_create,
594 .destroy_session = iscsi_session_teardown, 635 .destroy_session = iscsi_iser_session_destroy,
595 /* connection management */ 636 /* connection management */
596 .create_conn = iscsi_iser_conn_create, 637 .create_conn = iscsi_iser_conn_create,
597 .bind_conn = iscsi_iser_conn_bind, 638 .bind_conn = iscsi_iser_conn_bind,
@@ -600,17 +641,16 @@ static struct iscsi_transport iscsi_iser_transport = {
600 .get_conn_param = iscsi_conn_get_param, 641 .get_conn_param = iscsi_conn_get_param,
601 .get_session_param = iscsi_session_get_param, 642 .get_session_param = iscsi_session_get_param,
602 .start_conn = iscsi_iser_conn_start, 643 .start_conn = iscsi_iser_conn_start,
603 .stop_conn = iscsi_conn_stop, 644 .stop_conn = iscsi_iser_conn_stop,
604 /* iscsi host params */ 645 /* iscsi host params */
605 .get_host_param = iscsi_host_get_param, 646 .get_host_param = iscsi_host_get_param,
606 .set_host_param = iscsi_host_set_param, 647 .set_host_param = iscsi_host_set_param,
607 /* IO */ 648 /* IO */
608 .send_pdu = iscsi_conn_send_pdu, 649 .send_pdu = iscsi_conn_send_pdu,
609 .get_stats = iscsi_iser_conn_get_stats, 650 .get_stats = iscsi_iser_conn_get_stats,
610 .init_cmd_task = iscsi_iser_cmd_init, 651 .init_task = iscsi_iser_task_init,
611 .xmit_cmd_task = iscsi_iser_ctask_xmit, 652 .xmit_task = iscsi_iser_task_xmit,
612 .xmit_mgmt_task = iscsi_iser_mtask_xmit, 653 .cleanup_task = iscsi_iser_cleanup_task,
613 .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
614 /* recovery */ 654 /* recovery */
615 .session_recovery_timedout = iscsi_session_recovery_timedout, 655 .session_recovery_timedout = iscsi_session_recovery_timedout,
616 656
@@ -630,8 +670,6 @@ static int __init iser_init(void)
630 return -EINVAL; 670 return -EINVAL;
631 } 671 }
632 672
633 iscsi_iser_transport.max_lun = iscsi_max_lun;
634
635 memset(&ig, 0, sizeof(struct iser_global)); 673 memset(&ig, 0, sizeof(struct iser_global));
636 674
637 ig.desc_cache = kmem_cache_create("iser_descriptors", 675 ig.desc_cache = kmem_cache_create("iser_descriptors",
@@ -647,7 +685,9 @@ static int __init iser_init(void)
647 mutex_init(&ig.connlist_mutex); 685 mutex_init(&ig.connlist_mutex);
648 INIT_LIST_HEAD(&ig.connlist); 686 INIT_LIST_HEAD(&ig.connlist);
649 687
650 if (!iscsi_register_transport(&iscsi_iser_transport)) { 688 iscsi_iser_scsi_transport = iscsi_register_transport(
689 &iscsi_iser_transport);
690 if (!iscsi_iser_scsi_transport) {
651 iser_err("iscsi_register_transport failed\n"); 691 iser_err("iscsi_register_transport failed\n");
652 err = -EINVAL; 692 err = -EINVAL;
653 goto register_transport_failure; 693 goto register_transport_failure;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0e10703cf59e..81a82628a5f1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -94,7 +94,6 @@
94 /* support upto 512KB in one RDMA */ 94 /* support upto 512KB in one RDMA */
95#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) 95#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
96#define ISCSI_ISER_MAX_LUN 256 96#define ISCSI_ISER_MAX_LUN 256
97#define ISCSI_ISER_MAX_CMD_LEN 16
98 97
99/* QP settings */ 98/* QP settings */
100/* Maximal bounds on received asynchronous PDUs */ 99/* Maximal bounds on received asynchronous PDUs */
@@ -172,7 +171,8 @@ struct iser_data_buf {
172/* fwd declarations */ 171/* fwd declarations */
173struct iser_device; 172struct iser_device;
174struct iscsi_iser_conn; 173struct iscsi_iser_conn;
175struct iscsi_iser_cmd_task; 174struct iscsi_iser_task;
175struct iscsi_endpoint;
176 176
177struct iser_mem_reg { 177struct iser_mem_reg {
178 u32 lkey; 178 u32 lkey;
@@ -196,7 +196,7 @@ struct iser_regd_buf {
196#define MAX_REGD_BUF_VECTOR_LEN 2 196#define MAX_REGD_BUF_VECTOR_LEN 2
197 197
198struct iser_dto { 198struct iser_dto {
199 struct iscsi_iser_cmd_task *ctask; 199 struct iscsi_iser_task *task;
200 struct iser_conn *ib_conn; 200 struct iser_conn *ib_conn;
201 int notify_enable; 201 int notify_enable;
202 202
@@ -240,7 +240,9 @@ struct iser_device {
240 240
241struct iser_conn { 241struct iser_conn {
242 struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */ 242 struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
243 struct iscsi_endpoint *ep;
243 enum iser_ib_conn_state state; /* rdma connection state */ 244 enum iser_ib_conn_state state; /* rdma connection state */
245 atomic_t refcount;
244 spinlock_t lock; /* used for state changes */ 246 spinlock_t lock; /* used for state changes */
245 struct iser_device *device; /* device context */ 247 struct iser_device *device; /* device context */
246 struct rdma_cm_id *cma_id; /* CMA ID */ 248 struct rdma_cm_id *cma_id; /* CMA ID */
@@ -259,11 +261,9 @@ struct iser_conn {
259struct iscsi_iser_conn { 261struct iscsi_iser_conn {
260 struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */ 262 struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
261 struct iser_conn *ib_conn; /* iSER IB conn */ 263 struct iser_conn *ib_conn; /* iSER IB conn */
262
263 rwlock_t lock;
264}; 264};
265 265
266struct iscsi_iser_cmd_task { 266struct iscsi_iser_task {
267 struct iser_desc desc; 267 struct iser_desc desc;
268 struct iscsi_iser_conn *iser_conn; 268 struct iscsi_iser_conn *iser_conn;
269 enum iser_task_status status; 269 enum iser_task_status status;
@@ -296,22 +296,26 @@ extern int iser_debug_level;
296/* allocate connection resources needed for rdma functionality */ 296/* allocate connection resources needed for rdma functionality */
297int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); 297int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
298 298
299int iser_send_control(struct iscsi_conn *conn, 299int iser_send_control(struct iscsi_conn *conn,
300 struct iscsi_mgmt_task *mtask); 300 struct iscsi_task *task);
301 301
302int iser_send_command(struct iscsi_conn *conn, 302int iser_send_command(struct iscsi_conn *conn,
303 struct iscsi_cmd_task *ctask); 303 struct iscsi_task *task);
304 304
305int iser_send_data_out(struct iscsi_conn *conn, 305int iser_send_data_out(struct iscsi_conn *conn,
306 struct iscsi_cmd_task *ctask, 306 struct iscsi_task *task,
307 struct iscsi_data *hdr); 307 struct iscsi_data *hdr);
308 308
309void iscsi_iser_recv(struct iscsi_conn *conn, 309void iscsi_iser_recv(struct iscsi_conn *conn,
310 struct iscsi_hdr *hdr, 310 struct iscsi_hdr *hdr,
311 char *rx_data, 311 char *rx_data,
312 int rx_data_len); 312 int rx_data_len);
313 313
314int iser_conn_init(struct iser_conn **ib_conn); 314void iser_conn_init(struct iser_conn *ib_conn);
315
316void iser_conn_get(struct iser_conn *ib_conn);
317
318void iser_conn_put(struct iser_conn *ib_conn);
315 319
316void iser_conn_terminate(struct iser_conn *ib_conn); 320void iser_conn_terminate(struct iser_conn *ib_conn);
317 321
@@ -320,9 +324,9 @@ void iser_rcv_completion(struct iser_desc *desc,
320 324
321void iser_snd_completion(struct iser_desc *desc); 325void iser_snd_completion(struct iser_desc *desc);
322 326
323void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask); 327void iser_task_rdma_init(struct iscsi_iser_task *task);
324 328
325void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask); 329void iser_task_rdma_finalize(struct iscsi_iser_task *task);
326 330
327void iser_dto_buffs_release(struct iser_dto *dto); 331void iser_dto_buffs_release(struct iser_dto *dto);
328 332
@@ -332,10 +336,10 @@ void iser_reg_single(struct iser_device *device,
332 struct iser_regd_buf *regd_buf, 336 struct iser_regd_buf *regd_buf,
333 enum dma_data_direction direction); 337 enum dma_data_direction direction);
334 338
335void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask, 339void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
336 enum iser_data_dir cmd_dir); 340 enum iser_data_dir cmd_dir);
337 341
338int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask, 342int iser_reg_rdma_mem(struct iscsi_iser_task *task,
339 enum iser_data_dir cmd_dir); 343 enum iser_data_dir cmd_dir);
340 344
341int iser_connect(struct iser_conn *ib_conn, 345int iser_connect(struct iser_conn *ib_conn,
@@ -355,10 +359,10 @@ int iser_post_send(struct iser_desc *tx_desc);
355int iser_conn_state_comp(struct iser_conn *ib_conn, 359int iser_conn_state_comp(struct iser_conn *ib_conn,
356 enum iser_ib_conn_state comp); 360 enum iser_ib_conn_state comp);
357 361
358int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, 362int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
359 struct iser_data_buf *data, 363 struct iser_data_buf *data,
360 enum iser_data_dir iser_dir, 364 enum iser_data_dir iser_dir,
361 enum dma_data_direction dma_dir); 365 enum dma_data_direction dma_dir);
362 366
363void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask); 367void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
364#endif 368#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 31ad498bdc51..cdd283189047 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -64,46 +64,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
64 64
65/* Register user buffer memory and initialize passive rdma 65/* Register user buffer memory and initialize passive rdma
66 * dto descriptor. Total data size is stored in 66 * dto descriptor. Total data size is stored in
67 * iser_ctask->data[ISER_DIR_IN].data_len 67 * iser_task->data[ISER_DIR_IN].data_len
68 */ 68 */
69static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask, 69static int iser_prepare_read_cmd(struct iscsi_task *task,
70 unsigned int edtl) 70 unsigned int edtl)
71 71
72{ 72{
73 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 73 struct iscsi_iser_task *iser_task = task->dd_data;
74 struct iser_regd_buf *regd_buf; 74 struct iser_regd_buf *regd_buf;
75 int err; 75 int err;
76 struct iser_hdr *hdr = &iser_ctask->desc.iser_header; 76 struct iser_hdr *hdr = &iser_task->desc.iser_header;
77 struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN]; 77 struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
78 78
79 err = iser_dma_map_task_data(iser_ctask, 79 err = iser_dma_map_task_data(iser_task,
80 buf_in, 80 buf_in,
81 ISER_DIR_IN, 81 ISER_DIR_IN,
82 DMA_FROM_DEVICE); 82 DMA_FROM_DEVICE);
83 if (err) 83 if (err)
84 return err; 84 return err;
85 85
86 if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) { 86 if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
87 iser_err("Total data length: %ld, less than EDTL: " 87 iser_err("Total data length: %ld, less than EDTL: "
88 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", 88 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
89 iser_ctask->data[ISER_DIR_IN].data_len, edtl, 89 iser_task->data[ISER_DIR_IN].data_len, edtl,
90 ctask->itt, iser_ctask->iser_conn); 90 task->itt, iser_task->iser_conn);
91 return -EINVAL; 91 return -EINVAL;
92 } 92 }
93 93
94 err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN); 94 err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
95 if (err) { 95 if (err) {
96 iser_err("Failed to set up Data-IN RDMA\n"); 96 iser_err("Failed to set up Data-IN RDMA\n");
97 return err; 97 return err;
98 } 98 }
99 regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN]; 99 regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
100 100
101 hdr->flags |= ISER_RSV; 101 hdr->flags |= ISER_RSV;
102 hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); 102 hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
103 hdr->read_va = cpu_to_be64(regd_buf->reg.va); 103 hdr->read_va = cpu_to_be64(regd_buf->reg.va);
104 104
105 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", 105 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
106 ctask->itt, regd_buf->reg.rkey, 106 task->itt, regd_buf->reg.rkey,
107 (unsigned long long)regd_buf->reg.va); 107 (unsigned long long)regd_buf->reg.va);
108 108
109 return 0; 109 return 0;
@@ -111,43 +111,43 @@ static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
111 111
112/* Register user buffer memory and initialize passive rdma 112/* Register user buffer memory and initialize passive rdma
113 * dto descriptor. Total data size is stored in 113 * dto descriptor. Total data size is stored in
114 * ctask->data[ISER_DIR_OUT].data_len 114 * task->data[ISER_DIR_OUT].data_len
115 */ 115 */
116static int 116static int
117iser_prepare_write_cmd(struct iscsi_cmd_task *ctask, 117iser_prepare_write_cmd(struct iscsi_task *task,
118 unsigned int imm_sz, 118 unsigned int imm_sz,
119 unsigned int unsol_sz, 119 unsigned int unsol_sz,
120 unsigned int edtl) 120 unsigned int edtl)
121{ 121{
122 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 122 struct iscsi_iser_task *iser_task = task->dd_data;
123 struct iser_regd_buf *regd_buf; 123 struct iser_regd_buf *regd_buf;
124 int err; 124 int err;
125 struct iser_dto *send_dto = &iser_ctask->desc.dto; 125 struct iser_dto *send_dto = &iser_task->desc.dto;
126 struct iser_hdr *hdr = &iser_ctask->desc.iser_header; 126 struct iser_hdr *hdr = &iser_task->desc.iser_header;
127 struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT]; 127 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
128 128
129 err = iser_dma_map_task_data(iser_ctask, 129 err = iser_dma_map_task_data(iser_task,
130 buf_out, 130 buf_out,
131 ISER_DIR_OUT, 131 ISER_DIR_OUT,
132 DMA_TO_DEVICE); 132 DMA_TO_DEVICE);
133 if (err) 133 if (err)
134 return err; 134 return err;
135 135
136 if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) { 136 if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
137 iser_err("Total data length: %ld, less than EDTL: %d, " 137 iser_err("Total data length: %ld, less than EDTL: %d, "
138 "in WRITE cmd BHS itt: %d, conn: 0x%p\n", 138 "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
139 iser_ctask->data[ISER_DIR_OUT].data_len, 139 iser_task->data[ISER_DIR_OUT].data_len,
140 edtl, ctask->itt, ctask->conn); 140 edtl, task->itt, task->conn);
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 143
144 err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT); 144 err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
145 if (err != 0) { 145 if (err != 0) {
146 iser_err("Failed to register write cmd RDMA mem\n"); 146 iser_err("Failed to register write cmd RDMA mem\n");
147 return err; 147 return err;
148 } 148 }
149 149
150 regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT]; 150 regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
151 151
152 if (unsol_sz < edtl) { 152 if (unsol_sz < edtl) {
153 hdr->flags |= ISER_WSV; 153 hdr->flags |= ISER_WSV;
@@ -156,13 +156,13 @@ iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
156 156
157 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " 157 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
158 "VA:%#llX + unsol:%d\n", 158 "VA:%#llX + unsol:%d\n",
159 ctask->itt, regd_buf->reg.rkey, 159 task->itt, regd_buf->reg.rkey,
160 (unsigned long long)regd_buf->reg.va, unsol_sz); 160 (unsigned long long)regd_buf->reg.va, unsol_sz);
161 } 161 }
162 162
163 if (imm_sz > 0) { 163 if (imm_sz > 0) {
164 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", 164 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
165 ctask->itt, imm_sz); 165 task->itt, imm_sz);
166 iser_dto_add_regd_buff(send_dto, 166 iser_dto_add_regd_buff(send_dto,
167 regd_buf, 167 regd_buf,
168 0, 168 0,
@@ -314,38 +314,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
314/** 314/**
315 * iser_send_command - send command PDU 315 * iser_send_command - send command PDU
316 */ 316 */
317int iser_send_command(struct iscsi_conn *conn, 317int iser_send_command(struct iscsi_conn *conn,
318 struct iscsi_cmd_task *ctask) 318 struct iscsi_task *task)
319{ 319{
320 struct iscsi_iser_conn *iser_conn = conn->dd_data; 320 struct iscsi_iser_conn *iser_conn = conn->dd_data;
321 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 321 struct iscsi_iser_task *iser_task = task->dd_data;
322 struct iser_dto *send_dto = NULL; 322 struct iser_dto *send_dto = NULL;
323 unsigned long edtl; 323 unsigned long edtl;
324 int err = 0; 324 int err = 0;
325 struct iser_data_buf *data_buf; 325 struct iser_data_buf *data_buf;
326 326
327 struct iscsi_cmd *hdr = ctask->hdr; 327 struct iscsi_cmd *hdr = task->hdr;
328 struct scsi_cmnd *sc = ctask->sc; 328 struct scsi_cmnd *sc = task->sc;
329 329
330 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 330 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
331 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); 331 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
332 return -EPERM; 332 return -EPERM;
333 } 333 }
334 if (iser_check_xmit(conn, ctask)) 334 if (iser_check_xmit(conn, task))
335 return -ENOBUFS; 335 return -ENOBUFS;
336 336
337 edtl = ntohl(hdr->data_length); 337 edtl = ntohl(hdr->data_length);
338 338
339 /* build the tx desc regd header and add it to the tx desc dto */ 339 /* build the tx desc regd header and add it to the tx desc dto */
340 iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND; 340 iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
341 send_dto = &iser_ctask->desc.dto; 341 send_dto = &iser_task->desc.dto;
342 send_dto->ctask = iser_ctask; 342 send_dto->task = iser_task;
343 iser_create_send_desc(iser_conn, &iser_ctask->desc); 343 iser_create_send_desc(iser_conn, &iser_task->desc);
344 344
345 if (hdr->flags & ISCSI_FLAG_CMD_READ) 345 if (hdr->flags & ISCSI_FLAG_CMD_READ)
346 data_buf = &iser_ctask->data[ISER_DIR_IN]; 346 data_buf = &iser_task->data[ISER_DIR_IN];
347 else 347 else
348 data_buf = &iser_ctask->data[ISER_DIR_OUT]; 348 data_buf = &iser_task->data[ISER_DIR_OUT];
349 349
350 if (scsi_sg_count(sc)) { /* using a scatter list */ 350 if (scsi_sg_count(sc)) { /* using a scatter list */
351 data_buf->buf = scsi_sglist(sc); 351 data_buf->buf = scsi_sglist(sc);
@@ -355,15 +355,15 @@ int iser_send_command(struct iscsi_conn *conn,
355 data_buf->data_len = scsi_bufflen(sc); 355 data_buf->data_len = scsi_bufflen(sc);
356 356
357 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 357 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
358 err = iser_prepare_read_cmd(ctask, edtl); 358 err = iser_prepare_read_cmd(task, edtl);
359 if (err) 359 if (err)
360 goto send_command_error; 360 goto send_command_error;
361 } 361 }
362 if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { 362 if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
363 err = iser_prepare_write_cmd(ctask, 363 err = iser_prepare_write_cmd(task,
364 ctask->imm_count, 364 task->imm_count,
365 ctask->imm_count + 365 task->imm_count +
366 ctask->unsol_count, 366 task->unsol_count,
367 edtl); 367 edtl);
368 if (err) 368 if (err)
369 goto send_command_error; 369 goto send_command_error;
@@ -378,27 +378,27 @@ int iser_send_command(struct iscsi_conn *conn,
378 goto send_command_error; 378 goto send_command_error;
379 } 379 }
380 380
381 iser_ctask->status = ISER_TASK_STATUS_STARTED; 381 iser_task->status = ISER_TASK_STATUS_STARTED;
382 382
383 err = iser_post_send(&iser_ctask->desc); 383 err = iser_post_send(&iser_task->desc);
384 if (!err) 384 if (!err)
385 return 0; 385 return 0;
386 386
387send_command_error: 387send_command_error:
388 iser_dto_buffs_release(send_dto); 388 iser_dto_buffs_release(send_dto);
389 iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err); 389 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
390 return err; 390 return err;
391} 391}
392 392
393/** 393/**
394 * iser_send_data_out - send data out PDU 394 * iser_send_data_out - send data out PDU
395 */ 395 */
396int iser_send_data_out(struct iscsi_conn *conn, 396int iser_send_data_out(struct iscsi_conn *conn,
397 struct iscsi_cmd_task *ctask, 397 struct iscsi_task *task,
398 struct iscsi_data *hdr) 398 struct iscsi_data *hdr)
399{ 399{
400 struct iscsi_iser_conn *iser_conn = conn->dd_data; 400 struct iscsi_iser_conn *iser_conn = conn->dd_data;
401 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 401 struct iscsi_iser_task *iser_task = task->dd_data;
402 struct iser_desc *tx_desc = NULL; 402 struct iser_desc *tx_desc = NULL;
403 struct iser_dto *send_dto = NULL; 403 struct iser_dto *send_dto = NULL;
404 unsigned long buf_offset; 404 unsigned long buf_offset;
@@ -411,7 +411,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
411 return -EPERM; 411 return -EPERM;
412 } 412 }
413 413
414 if (iser_check_xmit(conn, ctask)) 414 if (iser_check_xmit(conn, task))
415 return -ENOBUFS; 415 return -ENOBUFS;
416 416
417 itt = (__force uint32_t)hdr->itt; 417 itt = (__force uint32_t)hdr->itt;
@@ -432,7 +432,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
432 432
433 /* build the tx desc regd header and add it to the tx desc dto */ 433 /* build the tx desc regd header and add it to the tx desc dto */
434 send_dto = &tx_desc->dto; 434 send_dto = &tx_desc->dto;
435 send_dto->ctask = iser_ctask; 435 send_dto->task = iser_task;
436 iser_create_send_desc(iser_conn, tx_desc); 436 iser_create_send_desc(iser_conn, tx_desc);
437 437
438 iser_reg_single(iser_conn->ib_conn->device, 438 iser_reg_single(iser_conn->ib_conn->device,
@@ -440,15 +440,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
440 440
441 /* all data was registered for RDMA, we can use the lkey */ 441 /* all data was registered for RDMA, we can use the lkey */
442 iser_dto_add_regd_buff(send_dto, 442 iser_dto_add_regd_buff(send_dto,
443 &iser_ctask->rdma_regd[ISER_DIR_OUT], 443 &iser_task->rdma_regd[ISER_DIR_OUT],
444 buf_offset, 444 buf_offset,
445 data_seg_len); 445 data_seg_len);
446 446
447 if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) { 447 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
448 iser_err("Offset:%ld & DSL:%ld in Data-Out " 448 iser_err("Offset:%ld & DSL:%ld in Data-Out "
449 "inconsistent with total len:%ld, itt:%d\n", 449 "inconsistent with total len:%ld, itt:%d\n",
450 buf_offset, data_seg_len, 450 buf_offset, data_seg_len,
451 iser_ctask->data[ISER_DIR_OUT].data_len, itt); 451 iser_task->data[ISER_DIR_OUT].data_len, itt);
452 err = -EINVAL; 452 err = -EINVAL;
453 goto send_data_out_error; 453 goto send_data_out_error;
454 } 454 }
@@ -468,10 +468,11 @@ send_data_out_error:
468} 468}
469 469
470int iser_send_control(struct iscsi_conn *conn, 470int iser_send_control(struct iscsi_conn *conn,
471 struct iscsi_mgmt_task *mtask) 471 struct iscsi_task *task)
472{ 472{
473 struct iscsi_iser_conn *iser_conn = conn->dd_data; 473 struct iscsi_iser_conn *iser_conn = conn->dd_data;
474 struct iser_desc *mdesc = mtask->dd_data; 474 struct iscsi_iser_task *iser_task = task->dd_data;
475 struct iser_desc *mdesc = &iser_task->desc;
475 struct iser_dto *send_dto = NULL; 476 struct iser_dto *send_dto = NULL;
476 unsigned long data_seg_len; 477 unsigned long data_seg_len;
477 int err = 0; 478 int err = 0;
@@ -483,27 +484,27 @@ int iser_send_control(struct iscsi_conn *conn,
483 return -EPERM; 484 return -EPERM;
484 } 485 }
485 486
486 if (iser_check_xmit(conn,mtask)) 487 if (iser_check_xmit(conn, task))
487 return -ENOBUFS; 488 return -ENOBUFS;
488 489
489 /* build the tx desc regd header and add it to the tx desc dto */ 490 /* build the tx desc regd header and add it to the tx desc dto */
490 mdesc->type = ISCSI_TX_CONTROL; 491 mdesc->type = ISCSI_TX_CONTROL;
491 send_dto = &mdesc->dto; 492 send_dto = &mdesc->dto;
492 send_dto->ctask = NULL; 493 send_dto->task = NULL;
493 iser_create_send_desc(iser_conn, mdesc); 494 iser_create_send_desc(iser_conn, mdesc);
494 495
495 device = iser_conn->ib_conn->device; 496 device = iser_conn->ib_conn->device;
496 497
497 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); 498 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
498 499
499 data_seg_len = ntoh24(mtask->hdr->dlength); 500 data_seg_len = ntoh24(task->hdr->dlength);
500 501
501 if (data_seg_len > 0) { 502 if (data_seg_len > 0) {
502 regd_buf = &mdesc->data_regd_buf; 503 regd_buf = &mdesc->data_regd_buf;
503 memset(regd_buf, 0, sizeof(struct iser_regd_buf)); 504 memset(regd_buf, 0, sizeof(struct iser_regd_buf));
504 regd_buf->device = device; 505 regd_buf->device = device;
505 regd_buf->virt_addr = mtask->data; 506 regd_buf->virt_addr = task->data;
506 regd_buf->data_size = mtask->data_count; 507 regd_buf->data_size = task->data_count;
507 iser_reg_single(device, regd_buf, 508 iser_reg_single(device, regd_buf,
508 DMA_TO_DEVICE); 509 DMA_TO_DEVICE);
509 iser_dto_add_regd_buff(send_dto, regd_buf, 510 iser_dto_add_regd_buff(send_dto, regd_buf,
@@ -533,15 +534,13 @@ send_control_error:
533void iser_rcv_completion(struct iser_desc *rx_desc, 534void iser_rcv_completion(struct iser_desc *rx_desc,
534 unsigned long dto_xfer_len) 535 unsigned long dto_xfer_len)
535{ 536{
536 struct iser_dto *dto = &rx_desc->dto; 537 struct iser_dto *dto = &rx_desc->dto;
537 struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; 538 struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
538 struct iscsi_session *session = conn->iscsi_conn->session; 539 struct iscsi_task *task;
539 struct iscsi_cmd_task *ctask; 540 struct iscsi_iser_task *iser_task;
540 struct iscsi_iser_cmd_task *iser_ctask;
541 struct iscsi_hdr *hdr; 541 struct iscsi_hdr *hdr;
542 char *rx_data = NULL; 542 char *rx_data = NULL;
543 int rx_data_len = 0; 543 int rx_data_len = 0;
544 unsigned int itt;
545 unsigned char opcode; 544 unsigned char opcode;
546 545
547 hdr = &rx_desc->iscsi_header; 546 hdr = &rx_desc->iscsi_header;
@@ -557,19 +556,24 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
557 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 556 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
558 557
559 if (opcode == ISCSI_OP_SCSI_CMD_RSP) { 558 if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
560 itt = get_itt(hdr->itt); /* mask out cid and age bits */ 559 spin_lock(&conn->iscsi_conn->session->lock);
561 if (!(itt < session->cmds_max)) 560 task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
561 if (task)
562 __iscsi_get_task(task);
563 spin_unlock(&conn->iscsi_conn->session->lock);
564
565 if (!task)
562 iser_err("itt can't be matched to task!!! " 566 iser_err("itt can't be matched to task!!! "
563 "conn %p opcode %d cmds_max %d itt %d\n", 567 "conn %p opcode %d itt %d\n",
564 conn->iscsi_conn,opcode,session->cmds_max,itt); 568 conn->iscsi_conn, opcode, hdr->itt);
565 /* use the mapping given with the cmds array indexed by itt */ 569 else {
566 ctask = (struct iscsi_cmd_task *)session->cmds[itt]; 570 iser_task = task->dd_data;
567 iser_ctask = ctask->dd_data; 571 iser_dbg("itt %d task %p\n",hdr->itt, task);
568 iser_dbg("itt %d ctask %p\n",itt,ctask); 572 iser_task->status = ISER_TASK_STATUS_COMPLETED;
569 iser_ctask->status = ISER_TASK_STATUS_COMPLETED; 573 iser_task_rdma_finalize(iser_task);
570 iser_ctask_rdma_finalize(iser_ctask); 574 iscsi_put_task(task);
575 }
571 } 576 }
572
573 iser_dto_buffs_release(dto); 577 iser_dto_buffs_release(dto);
574 578
575 iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); 579 iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
@@ -590,7 +594,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
590 struct iser_conn *ib_conn = dto->ib_conn; 594 struct iser_conn *ib_conn = dto->ib_conn;
591 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; 595 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
592 struct iscsi_conn *conn = iser_conn->iscsi_conn; 596 struct iscsi_conn *conn = iser_conn->iscsi_conn;
593 struct iscsi_mgmt_task *mtask; 597 struct iscsi_task *task;
594 int resume_tx = 0; 598 int resume_tx = 0;
595 599
596 iser_dbg("Initiator, Data sent dto=0x%p\n", dto); 600 iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
@@ -613,36 +617,31 @@ void iser_snd_completion(struct iser_desc *tx_desc)
613 617
614 if (tx_desc->type == ISCSI_TX_CONTROL) { 618 if (tx_desc->type == ISCSI_TX_CONTROL) {
615 /* this arithmetic is legal by libiscsi dd_data allocation */ 619 /* this arithmetic is legal by libiscsi dd_data allocation */
616 mtask = (void *) ((long)(void *)tx_desc - 620 task = (void *) ((long)(void *)tx_desc -
617 sizeof(struct iscsi_mgmt_task)); 621 sizeof(struct iscsi_task));
618 if (mtask->hdr->itt == RESERVED_ITT) { 622 if (task->hdr->itt == RESERVED_ITT)
619 struct iscsi_session *session = conn->session; 623 iscsi_put_task(task);
620
621 spin_lock(&conn->session->lock);
622 iscsi_free_mgmt_task(conn, mtask);
623 spin_unlock(&session->lock);
624 }
625 } 624 }
626} 625}
627 626
628void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask) 627void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
629 628
630{ 629{
631 iser_ctask->status = ISER_TASK_STATUS_INIT; 630 iser_task->status = ISER_TASK_STATUS_INIT;
632 631
633 iser_ctask->dir[ISER_DIR_IN] = 0; 632 iser_task->dir[ISER_DIR_IN] = 0;
634 iser_ctask->dir[ISER_DIR_OUT] = 0; 633 iser_task->dir[ISER_DIR_OUT] = 0;
635 634
636 iser_ctask->data[ISER_DIR_IN].data_len = 0; 635 iser_task->data[ISER_DIR_IN].data_len = 0;
637 iser_ctask->data[ISER_DIR_OUT].data_len = 0; 636 iser_task->data[ISER_DIR_OUT].data_len = 0;
638 637
639 memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0, 638 memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
640 sizeof(struct iser_regd_buf)); 639 sizeof(struct iser_regd_buf));
641 memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0, 640 memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
642 sizeof(struct iser_regd_buf)); 641 sizeof(struct iser_regd_buf));
643} 642}
644 643
645void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) 644void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
646{ 645{
647 int deferred; 646 int deferred;
648 int is_rdma_aligned = 1; 647 int is_rdma_aligned = 1;
@@ -651,17 +650,17 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
651 /* if we were reading, copy back to unaligned sglist, 650 /* if we were reading, copy back to unaligned sglist,
652 * anyway dma_unmap and free the copy 651 * anyway dma_unmap and free the copy
653 */ 652 */
654 if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) { 653 if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
655 is_rdma_aligned = 0; 654 is_rdma_aligned = 0;
656 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); 655 iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
657 } 656 }
658 if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) { 657 if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
659 is_rdma_aligned = 0; 658 is_rdma_aligned = 0;
660 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); 659 iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
661 } 660 }
662 661
663 if (iser_ctask->dir[ISER_DIR_IN]) { 662 if (iser_task->dir[ISER_DIR_IN]) {
664 regd = &iser_ctask->rdma_regd[ISER_DIR_IN]; 663 regd = &iser_task->rdma_regd[ISER_DIR_IN];
665 deferred = iser_regd_buff_release(regd); 664 deferred = iser_regd_buff_release(regd);
666 if (deferred) { 665 if (deferred) {
667 iser_err("%d references remain for BUF-IN rdma reg\n", 666 iser_err("%d references remain for BUF-IN rdma reg\n",
@@ -669,8 +668,8 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
669 } 668 }
670 } 669 }
671 670
672 if (iser_ctask->dir[ISER_DIR_OUT]) { 671 if (iser_task->dir[ISER_DIR_OUT]) {
673 regd = &iser_ctask->rdma_regd[ISER_DIR_OUT]; 672 regd = &iser_task->rdma_regd[ISER_DIR_OUT];
674 deferred = iser_regd_buff_release(regd); 673 deferred = iser_regd_buff_release(regd);
675 if (deferred) { 674 if (deferred) {
676 iser_err("%d references remain for BUF-OUT rdma reg\n", 675 iser_err("%d references remain for BUF-OUT rdma reg\n",
@@ -680,7 +679,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
680 679
681 /* if the data was unaligned, it was already unmapped and then copied */ 680 /* if the data was unaligned, it was already unmapped and then copied */
682 if (is_rdma_aligned) 681 if (is_rdma_aligned)
683 iser_dma_unmap_task_data(iser_ctask); 682 iser_dma_unmap_task_data(iser_task);
684} 683}
685 684
686void iser_dto_buffs_release(struct iser_dto *dto) 685void iser_dto_buffs_release(struct iser_dto *dto)
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 81e49cb10ed3..b9453d068e9d 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -99,13 +99,13 @@ void iser_reg_single(struct iser_device *device,
99/** 99/**
100 * iser_start_rdma_unaligned_sg 100 * iser_start_rdma_unaligned_sg
101 */ 101 */
102static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 102static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
103 enum iser_data_dir cmd_dir) 103 enum iser_data_dir cmd_dir)
104{ 104{
105 int dma_nents; 105 int dma_nents;
106 struct ib_device *dev; 106 struct ib_device *dev;
107 char *mem = NULL; 107 char *mem = NULL;
108 struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; 108 struct iser_data_buf *data = &iser_task->data[cmd_dir];
109 unsigned long cmd_data_len = data->data_len; 109 unsigned long cmd_data_len = data->data_len;
110 110
111 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 111 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
@@ -138,37 +138,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
138 } 138 }
139 } 139 }
140 140
141 sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len); 141 sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
142 iser_ctask->data_copy[cmd_dir].buf = 142 iser_task->data_copy[cmd_dir].buf =
143 &iser_ctask->data_copy[cmd_dir].sg_single; 143 &iser_task->data_copy[cmd_dir].sg_single;
144 iser_ctask->data_copy[cmd_dir].size = 1; 144 iser_task->data_copy[cmd_dir].size = 1;
145 145
146 iser_ctask->data_copy[cmd_dir].copy_buf = mem; 146 iser_task->data_copy[cmd_dir].copy_buf = mem;
147 147
148 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 148 dev = iser_task->iser_conn->ib_conn->device->ib_device;
149 dma_nents = ib_dma_map_sg(dev, 149 dma_nents = ib_dma_map_sg(dev,
150 &iser_ctask->data_copy[cmd_dir].sg_single, 150 &iser_task->data_copy[cmd_dir].sg_single,
151 1, 151 1,
152 (cmd_dir == ISER_DIR_OUT) ? 152 (cmd_dir == ISER_DIR_OUT) ?
153 DMA_TO_DEVICE : DMA_FROM_DEVICE); 153 DMA_TO_DEVICE : DMA_FROM_DEVICE);
154 BUG_ON(dma_nents == 0); 154 BUG_ON(dma_nents == 0);
155 155
156 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; 156 iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
157 return 0; 157 return 0;
158} 158}
159 159
160/** 160/**
161 * iser_finalize_rdma_unaligned_sg 161 * iser_finalize_rdma_unaligned_sg
162 */ 162 */
163void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 163void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
164 enum iser_data_dir cmd_dir) 164 enum iser_data_dir cmd_dir)
165{ 165{
166 struct ib_device *dev; 166 struct ib_device *dev;
167 struct iser_data_buf *mem_copy; 167 struct iser_data_buf *mem_copy;
168 unsigned long cmd_data_len; 168 unsigned long cmd_data_len;
169 169
170 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 170 dev = iser_task->iser_conn->ib_conn->device->ib_device;
171 mem_copy = &iser_ctask->data_copy[cmd_dir]; 171 mem_copy = &iser_task->data_copy[cmd_dir];
172 172
173 ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, 173 ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
174 (cmd_dir == ISER_DIR_OUT) ? 174 (cmd_dir == ISER_DIR_OUT) ?
@@ -184,8 +184,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
184 /* copy back read RDMA to unaligned sg */ 184 /* copy back read RDMA to unaligned sg */
185 mem = mem_copy->copy_buf; 185 mem = mem_copy->copy_buf;
186 186
187 sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; 187 sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
188 sg_size = iser_ctask->data[ISER_DIR_IN].size; 188 sg_size = iser_task->data[ISER_DIR_IN].size;
189 189
190 p = mem; 190 p = mem;
191 for_each_sg(sgl, sg, sg_size, i) { 191 for_each_sg(sgl, sg, sg_size, i) {
@@ -198,7 +198,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
198 } 198 }
199 } 199 }
200 200
201 cmd_data_len = iser_ctask->data[cmd_dir].data_len; 201 cmd_data_len = iser_task->data[cmd_dir].data_len;
202 202
203 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 203 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
204 free_pages((unsigned long)mem_copy->copy_buf, 204 free_pages((unsigned long)mem_copy->copy_buf,
@@ -376,15 +376,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
376 } 376 }
377} 377}
378 378
379int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, 379int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
380 struct iser_data_buf *data, 380 struct iser_data_buf *data,
381 enum iser_data_dir iser_dir, 381 enum iser_data_dir iser_dir,
382 enum dma_data_direction dma_dir) 382 enum dma_data_direction dma_dir)
383{ 383{
384 struct ib_device *dev; 384 struct ib_device *dev;
385 385
386 iser_ctask->dir[iser_dir] = 1; 386 iser_task->dir[iser_dir] = 1;
387 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 387 dev = iser_task->iser_conn->ib_conn->device->ib_device;
388 388
389 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); 389 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
390 if (data->dma_nents == 0) { 390 if (data->dma_nents == 0) {
@@ -394,20 +394,20 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
394 return 0; 394 return 0;
395} 395}
396 396
397void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) 397void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
398{ 398{
399 struct ib_device *dev; 399 struct ib_device *dev;
400 struct iser_data_buf *data; 400 struct iser_data_buf *data;
401 401
402 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 402 dev = iser_task->iser_conn->ib_conn->device->ib_device;
403 403
404 if (iser_ctask->dir[ISER_DIR_IN]) { 404 if (iser_task->dir[ISER_DIR_IN]) {
405 data = &iser_ctask->data[ISER_DIR_IN]; 405 data = &iser_task->data[ISER_DIR_IN];
406 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 406 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
407 } 407 }
408 408
409 if (iser_ctask->dir[ISER_DIR_OUT]) { 409 if (iser_task->dir[ISER_DIR_OUT]) {
410 data = &iser_ctask->data[ISER_DIR_OUT]; 410 data = &iser_task->data[ISER_DIR_OUT];
411 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); 411 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
412 } 412 }
413} 413}
@@ -418,21 +418,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
418 * 418 *
419 * returns 0 on success, errno code on failure 419 * returns 0 on success, errno code on failure
420 */ 420 */
421int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, 421int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
422 enum iser_data_dir cmd_dir) 422 enum iser_data_dir cmd_dir)
423{ 423{
424 struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn; 424 struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
425 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 425 struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
426 struct iser_device *device = ib_conn->device; 426 struct iser_device *device = ib_conn->device;
427 struct ib_device *ibdev = device->ib_device; 427 struct ib_device *ibdev = device->ib_device;
428 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 428 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
429 struct iser_regd_buf *regd_buf; 429 struct iser_regd_buf *regd_buf;
430 int aligned_len; 430 int aligned_len;
431 int err; 431 int err;
432 int i; 432 int i;
433 struct scatterlist *sg; 433 struct scatterlist *sg;
434 434
435 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 435 regd_buf = &iser_task->rdma_regd[cmd_dir];
436 436
437 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 437 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
438 if (aligned_len != mem->dma_nents) { 438 if (aligned_len != mem->dma_nents) {
@@ -442,13 +442,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
442 iser_data_buf_dump(mem, ibdev); 442 iser_data_buf_dump(mem, ibdev);
443 443
444 /* unmap the command data before accessing it */ 444 /* unmap the command data before accessing it */
445 iser_dma_unmap_task_data(iser_ctask); 445 iser_dma_unmap_task_data(iser_task);
446 446
447 /* allocate copy buf, if we are writing, copy the */ 447 /* allocate copy buf, if we are writing, copy the */
448 /* unaligned scatterlist, dma map the copy */ 448 /* unaligned scatterlist, dma map the copy */
449 if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) 449 if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
450 return -ENOMEM; 450 return -ENOMEM;
451 mem = &iser_ctask->data_copy[cmd_dir]; 451 mem = &iser_task->data_copy[cmd_dir];
452 } 452 }
453 453
454 /* if there a single dma entry, FMR is not needed */ 454 /* if there a single dma entry, FMR is not needed */
@@ -472,8 +472,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
472 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); 472 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
473 if (err) { 473 if (err) {
474 iser_data_buf_dump(mem, ibdev); 474 iser_data_buf_dump(mem, ibdev);
475 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, 475 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
476 ntoh24(iser_ctask->desc.iscsi_header.dlength)); 476 mem->dma_nents,
477 ntoh24(iser_task->desc.iscsi_header.dlength));
477 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", 478 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
478 ib_conn->page_vec->data_size, ib_conn->page_vec->length, 479 ib_conn->page_vec->data_size, ib_conn->page_vec->length,
479 ib_conn->page_vec->offset); 480 ib_conn->page_vec->offset);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 77cabee7cc08..3a917c1f796f 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -323,7 +323,18 @@ static void iser_conn_release(struct iser_conn *ib_conn)
323 iser_device_try_release(device); 323 iser_device_try_release(device);
324 if (ib_conn->iser_conn) 324 if (ib_conn->iser_conn)
325 ib_conn->iser_conn->ib_conn = NULL; 325 ib_conn->iser_conn->ib_conn = NULL;
326 kfree(ib_conn); 326 iscsi_destroy_endpoint(ib_conn->ep);
327}
328
329void iser_conn_get(struct iser_conn *ib_conn)
330{
331 atomic_inc(&ib_conn->refcount);
332}
333
334void iser_conn_put(struct iser_conn *ib_conn)
335{
336 if (atomic_dec_and_test(&ib_conn->refcount))
337 iser_conn_release(ib_conn);
327} 338}
328 339
329/** 340/**
@@ -347,7 +358,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
347 wait_event_interruptible(ib_conn->wait, 358 wait_event_interruptible(ib_conn->wait,
348 ib_conn->state == ISER_CONN_DOWN); 359 ib_conn->state == ISER_CONN_DOWN);
349 360
350 iser_conn_release(ib_conn); 361 iser_conn_put(ib_conn);
351} 362}
352 363
353static void iser_connect_error(struct rdma_cm_id *cma_id) 364static void iser_connect_error(struct rdma_cm_id *cma_id)
@@ -481,24 +492,15 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
481 return ret; 492 return ret;
482} 493}
483 494
484int iser_conn_init(struct iser_conn **ibconn) 495void iser_conn_init(struct iser_conn *ib_conn)
485{ 496{
486 struct iser_conn *ib_conn;
487
488 ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
489 if (!ib_conn) {
490 iser_err("can't alloc memory for struct iser_conn\n");
491 return -ENOMEM;
492 }
493 ib_conn->state = ISER_CONN_INIT; 497 ib_conn->state = ISER_CONN_INIT;
494 init_waitqueue_head(&ib_conn->wait); 498 init_waitqueue_head(&ib_conn->wait);
495 atomic_set(&ib_conn->post_recv_buf_count, 0); 499 atomic_set(&ib_conn->post_recv_buf_count, 0);
496 atomic_set(&ib_conn->post_send_buf_count, 0); 500 atomic_set(&ib_conn->post_send_buf_count, 0);
501 atomic_set(&ib_conn->refcount, 1);
497 INIT_LIST_HEAD(&ib_conn->conn_list); 502 INIT_LIST_HEAD(&ib_conn->conn_list);
498 spin_lock_init(&ib_conn->lock); 503 spin_lock_init(&ib_conn->lock);
499
500 *ibconn = ib_conn;
501 return 0;
502} 504}
503 505
504 /** 506 /**
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 40c70ba62bf0..e5d446804d32 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -46,7 +46,6 @@
46#endif 46#endif
47 47
48 48
49EXPORT_SYMBOL(adb_controller);
50EXPORT_SYMBOL(adb_client_list); 49EXPORT_SYMBOL(adb_client_list);
51 50
52extern struct adb_driver via_macii_driver; 51extern struct adb_driver via_macii_driver;
@@ -80,7 +79,7 @@ static struct adb_driver *adb_driver_list[] = {
80 79
81static struct class *adb_dev_class; 80static struct class *adb_dev_class;
82 81
83struct adb_driver *adb_controller; 82static struct adb_driver *adb_controller;
84BLOCKING_NOTIFIER_HEAD(adb_client_list); 83BLOCKING_NOTIFIER_HEAD(adb_client_list);
85static int adb_got_sleep; 84static int adb_got_sleep;
86static int adb_inited; 85static int adb_inited;
@@ -290,7 +289,7 @@ static int adb_resume(struct platform_device *dev)
290} 289}
291#endif /* CONFIG_PM */ 290#endif /* CONFIG_PM */
292 291
293int __init adb_init(void) 292static int __init adb_init(void)
294{ 293{
295 struct adb_driver *driver; 294 struct adb_driver *driver;
296 int i; 295 int i;
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index ef4c117ea35f..59ea520a5d7a 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -75,7 +75,7 @@ static struct notifier_block adbhid_adb_notifier = {
75#define ADB_KEY_POWER_OLD 0x7e 75#define ADB_KEY_POWER_OLD 0x7e
76#define ADB_KEY_POWER 0x7f 76#define ADB_KEY_POWER 0x7f
77 77
78u16 adb_to_linux_keycodes[128] = { 78static const u16 adb_to_linux_keycodes[128] = {
79 /* 0x00 */ KEY_A, /* 30 */ 79 /* 0x00 */ KEY_A, /* 30 */
80 /* 0x01 */ KEY_S, /* 31 */ 80 /* 0x01 */ KEY_S, /* 31 */
81 /* 0x02 */ KEY_D, /* 32 */ 81 /* 0x02 */ KEY_D, /* 32 */
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 112e5ef728f1..9e9453b58425 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -44,7 +44,7 @@ static ssize_t modalias_show (struct device *dev, struct device_attribute *attr,
44 struct of_device *ofdev = to_of_device(dev); 44 struct of_device *ofdev = to_of_device(dev);
45 int len; 45 int len;
46 46
47 len = of_device_get_modalias(ofdev, buf, PAGE_SIZE); 47 len = of_device_get_modalias(ofdev, buf, PAGE_SIZE - 2);
48 48
49 buf[len] = '\n'; 49 buf[len] = '\n';
50 buf[len+1] = 0; 50 buf[len+1] = 0;
@@ -52,6 +52,15 @@ static ssize_t modalias_show (struct device *dev, struct device_attribute *attr,
52 return len+1; 52 return len+1;
53} 53}
54 54
55static ssize_t devspec_show(struct device *dev,
56 struct device_attribute *attr, char *buf)
57{
58 struct of_device *ofdev;
59
60 ofdev = to_of_device(dev);
61 return sprintf(buf, "%s\n", ofdev->node->full_name);
62}
63
55macio_config_of_attr (name, "%s\n"); 64macio_config_of_attr (name, "%s\n");
56macio_config_of_attr (type, "%s\n"); 65macio_config_of_attr (type, "%s\n");
57 66
@@ -60,5 +69,6 @@ struct device_attribute macio_dev_attrs[] = {
60 __ATTR_RO(type), 69 __ATTR_RO(type),
61 __ATTR_RO(compatible), 70 __ATTR_RO(compatible),
62 __ATTR_RO(modalias), 71 __ATTR_RO(modalias),
72 __ATTR_RO(devspec),
63 __ATTR_NULL 73 __ATTR_NULL
64}; 74};
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 818aba368541..b1e5b4705250 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/ide.h> 22#include <linux/ide.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/mutex.h>
24#include <asm/prom.h> 25#include <asm/prom.h>
25#include <asm/pgtable.h> 26#include <asm/pgtable.h>
26#include <asm/io.h> 27#include <asm/io.h>
@@ -77,7 +78,7 @@ struct media_bay_info {
77 int index; 78 int index;
78 int cached_gpio; 79 int cached_gpio;
79 int sleeping; 80 int sleeping;
80 struct semaphore lock; 81 struct mutex lock;
81#ifdef CONFIG_BLK_DEV_IDE_PMAC 82#ifdef CONFIG_BLK_DEV_IDE_PMAC
82 ide_hwif_t *cd_port; 83 ide_hwif_t *cd_port;
83 void __iomem *cd_base; 84 void __iomem *cd_base;
@@ -459,27 +460,27 @@ int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
459 if (bay->mdev && which_bay == bay->mdev->ofdev.node) { 460 if (bay->mdev && which_bay == bay->mdev->ofdev.node) {
460 int timeout = 5000, index = hwif->index; 461 int timeout = 5000, index = hwif->index;
461 462
462 down(&bay->lock); 463 mutex_lock(&bay->lock);
463 464
464 bay->cd_port = hwif; 465 bay->cd_port = hwif;
465 bay->cd_base = (void __iomem *) base; 466 bay->cd_base = (void __iomem *) base;
466 bay->cd_irq = irq; 467 bay->cd_irq = irq;
467 468
468 if ((MB_CD != bay->content_id) || bay->state != mb_up) { 469 if ((MB_CD != bay->content_id) || bay->state != mb_up) {
469 up(&bay->lock); 470 mutex_unlock(&bay->lock);
470 return 0; 471 return 0;
471 } 472 }
472 printk(KERN_DEBUG "Registered ide%d for media bay %d\n", index, i); 473 printk(KERN_DEBUG "Registered ide%d for media bay %d\n", index, i);
473 do { 474 do {
474 if (MB_IDE_READY(i)) { 475 if (MB_IDE_READY(i)) {
475 bay->cd_index = index; 476 bay->cd_index = index;
476 up(&bay->lock); 477 mutex_unlock(&bay->lock);
477 return 0; 478 return 0;
478 } 479 }
479 mdelay(1); 480 mdelay(1);
480 } while(--timeout); 481 } while(--timeout);
481 printk(KERN_DEBUG "Timeount waiting IDE in bay %d\n", i); 482 printk(KERN_DEBUG "Timeount waiting IDE in bay %d\n", i);
482 up(&bay->lock); 483 mutex_unlock(&bay->lock);
483 return -ENODEV; 484 return -ENODEV;
484 } 485 }
485 } 486 }
@@ -617,10 +618,10 @@ static int media_bay_task(void *x)
617 618
618 while (!kthread_should_stop()) { 619 while (!kthread_should_stop()) {
619 for (i = 0; i < media_bay_count; ++i) { 620 for (i = 0; i < media_bay_count; ++i) {
620 down(&media_bays[i].lock); 621 mutex_lock(&media_bays[i].lock);
621 if (!media_bays[i].sleeping) 622 if (!media_bays[i].sleeping)
622 media_bay_step(i); 623 media_bay_step(i);
623 up(&media_bays[i].lock); 624 mutex_unlock(&media_bays[i].lock);
624 } 625 }
625 626
626 msleep_interruptible(MB_POLL_DELAY); 627 msleep_interruptible(MB_POLL_DELAY);
@@ -660,7 +661,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
660 bay->index = i; 661 bay->index = i;
661 bay->ops = match->data; 662 bay->ops = match->data;
662 bay->sleeping = 0; 663 bay->sleeping = 0;
663 init_MUTEX(&bay->lock); 664 mutex_init(&bay->lock);
664 665
665 /* Init HW probing */ 666 /* Init HW probing */
666 if (bay->ops->init) 667 if (bay->ops->init)
@@ -698,10 +699,10 @@ static int media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
698 699
699 if (state.event != mdev->ofdev.dev.power.power_state.event 700 if (state.event != mdev->ofdev.dev.power.power_state.event
700 && (state.event & PM_EVENT_SLEEP)) { 701 && (state.event & PM_EVENT_SLEEP)) {
701 down(&bay->lock); 702 mutex_lock(&bay->lock);
702 bay->sleeping = 1; 703 bay->sleeping = 1;
703 set_mb_power(bay, 0); 704 set_mb_power(bay, 0);
704 up(&bay->lock); 705 mutex_unlock(&bay->lock);
705 msleep(MB_POLL_DELAY); 706 msleep(MB_POLL_DELAY);
706 mdev->ofdev.dev.power.power_state = state; 707 mdev->ofdev.dev.power.power_state = state;
707 } 708 }
@@ -720,12 +721,12 @@ static int media_bay_resume(struct macio_dev *mdev)
720 they seem to help the 3400 get it right. 721 they seem to help the 3400 get it right.
721 */ 722 */
722 /* Force MB power to 0 */ 723 /* Force MB power to 0 */
723 down(&bay->lock); 724 mutex_lock(&bay->lock);
724 set_mb_power(bay, 0); 725 set_mb_power(bay, 0);
725 msleep(MB_POWER_DELAY); 726 msleep(MB_POWER_DELAY);
726 if (bay->ops->content(bay) != bay->content_id) { 727 if (bay->ops->content(bay) != bay->content_id) {
727 printk("mediabay%d: content changed during sleep...\n", bay->index); 728 printk("mediabay%d: content changed during sleep...\n", bay->index);
728 up(&bay->lock); 729 mutex_unlock(&bay->lock);
729 return 0; 730 return 0;
730 } 731 }
731 set_mb_power(bay, 1); 732 set_mb_power(bay, 1);
@@ -741,7 +742,7 @@ static int media_bay_resume(struct macio_dev *mdev)
741 } while((bay->state != mb_empty) && 742 } while((bay->state != mb_empty) &&
742 (bay->state != mb_up)); 743 (bay->state != mb_up));
743 bay->sleeping = 0; 744 bay->sleeping = 0;
744 up(&bay->lock); 745 mutex_unlock(&bay->lock);
745 } 746 }
746 return 0; 747 return 0;
747} 748}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 32cb0298f88e..96faa799b82a 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -36,6 +36,8 @@
36#include <linux/sysdev.h> 36#include <linux/sysdev.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/mutex.h> 38#include <linux/mutex.h>
39#include <linux/of_device.h>
40#include <linux/of_platform.h>
39 41
40#include <asm/byteorder.h> 42#include <asm/byteorder.h>
41#include <asm/io.h> 43#include <asm/io.h>
@@ -46,8 +48,6 @@
46#include <asm/sections.h> 48#include <asm/sections.h>
47#include <asm/abs_addr.h> 49#include <asm/abs_addr.h>
48#include <asm/uaccess.h> 50#include <asm/uaccess.h>
49#include <asm/of_device.h>
50#include <asm/of_platform.h>
51 51
52#define VERSION "0.7" 52#define VERSION "0.7"
53#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp." 53#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
@@ -475,6 +475,7 @@ int __init smu_init (void)
475{ 475{
476 struct device_node *np; 476 struct device_node *np;
477 const u32 *data; 477 const u32 *data;
478 int ret = 0;
478 479
479 np = of_find_node_by_type(NULL, "smu"); 480 np = of_find_node_by_type(NULL, "smu");
480 if (np == NULL) 481 if (np == NULL)
@@ -484,16 +485,11 @@ int __init smu_init (void)
484 485
485 if (smu_cmdbuf_abs == 0) { 486 if (smu_cmdbuf_abs == 0) {
486 printk(KERN_ERR "SMU: Command buffer not allocated !\n"); 487 printk(KERN_ERR "SMU: Command buffer not allocated !\n");
487 of_node_put(np); 488 ret = -EINVAL;
488 return -EINVAL; 489 goto fail_np;
489 } 490 }
490 491
491 smu = alloc_bootmem(sizeof(struct smu_device)); 492 smu = alloc_bootmem(sizeof(struct smu_device));
492 if (smu == NULL) {
493 of_node_put(np);
494 return -ENOMEM;
495 }
496 memset(smu, 0, sizeof(*smu));
497 493
498 spin_lock_init(&smu->lock); 494 spin_lock_init(&smu->lock);
499 INIT_LIST_HEAD(&smu->cmd_list); 495 INIT_LIST_HEAD(&smu->cmd_list);
@@ -511,14 +507,14 @@ int __init smu_init (void)
511 smu->db_node = of_find_node_by_name(NULL, "smu-doorbell"); 507 smu->db_node = of_find_node_by_name(NULL, "smu-doorbell");
512 if (smu->db_node == NULL) { 508 if (smu->db_node == NULL) {
513 printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n"); 509 printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n");
514 goto fail; 510 ret = -ENXIO;
511 goto fail_bootmem;
515 } 512 }
516 data = of_get_property(smu->db_node, "reg", NULL); 513 data = of_get_property(smu->db_node, "reg", NULL);
517 if (data == NULL) { 514 if (data == NULL) {
518 of_node_put(smu->db_node);
519 smu->db_node = NULL;
520 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n"); 515 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
521 goto fail; 516 ret = -ENXIO;
517 goto fail_db_node;
522 } 518 }
523 519
524 /* Current setup has one doorbell GPIO that does both doorbell 520 /* Current setup has one doorbell GPIO that does both doorbell
@@ -552,7 +548,8 @@ int __init smu_init (void)
552 smu->db_buf = ioremap(0x8000860c, 0x1000); 548 smu->db_buf = ioremap(0x8000860c, 0x1000);
553 if (smu->db_buf == NULL) { 549 if (smu->db_buf == NULL) {
554 printk(KERN_ERR "SMU: Can't map doorbell buffer pointer !\n"); 550 printk(KERN_ERR "SMU: Can't map doorbell buffer pointer !\n");
555 goto fail; 551 ret = -ENXIO;
552 goto fail_msg_node;
556 } 553 }
557 554
558 /* U3 has an issue with NAP mode when issuing SMU commands */ 555 /* U3 has an issue with NAP mode when issuing SMU commands */
@@ -563,10 +560,17 @@ int __init smu_init (void)
563 sys_ctrler = SYS_CTRLER_SMU; 560 sys_ctrler = SYS_CTRLER_SMU;
564 return 0; 561 return 0;
565 562
566 fail: 563fail_msg_node:
564 if (smu->msg_node)
565 of_node_put(smu->msg_node);
566fail_db_node:
567 of_node_put(smu->db_node);
568fail_bootmem:
569 free_bootmem((unsigned long)smu, sizeof(struct smu_device));
567 smu = NULL; 570 smu = NULL;
568 return -ENXIO; 571fail_np:
569 572 of_node_put(np);
573 return ret;
570} 574}
571 575
572 576
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 5366dc93fb38..22bf981d393b 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -24,13 +24,13 @@
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/moduleparam.h> 25#include <linux/moduleparam.h>
26#include <linux/freezer.h> 26#include <linux/freezer.h>
27#include <linux/of_platform.h>
27 28
28#include <asm/prom.h> 29#include <asm/prom.h>
29#include <asm/machdep.h> 30#include <asm/machdep.h>
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/system.h> 32#include <asm/system.h>
32#include <asm/sections.h> 33#include <asm/sections.h>
33#include <asm/of_platform.h>
34 34
35#undef DEBUG 35#undef DEBUG
36 36
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index ddfb426a9abd..817607e2af6a 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -123,14 +123,14 @@
123#include <linux/i2c.h> 123#include <linux/i2c.h>
124#include <linux/kthread.h> 124#include <linux/kthread.h>
125#include <linux/mutex.h> 125#include <linux/mutex.h>
126#include <linux/of_device.h>
127#include <linux/of_platform.h>
126#include <asm/prom.h> 128#include <asm/prom.h>
127#include <asm/machdep.h> 129#include <asm/machdep.h>
128#include <asm/io.h> 130#include <asm/io.h>
129#include <asm/system.h> 131#include <asm/system.h>
130#include <asm/sections.h> 132#include <asm/sections.h>
131#include <asm/of_device.h>
132#include <asm/macio.h> 133#include <asm/macio.h>
133#include <asm/of_platform.h>
134 134
135#include "therm_pm72.h" 135#include "therm_pm72.h"
136 136
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index d11821af3b8d..3da0a02efd76 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -37,13 +37,13 @@
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/kthread.h> 39#include <linux/kthread.h>
40#include <linux/of_platform.h>
40 41
41#include <asm/prom.h> 42#include <asm/prom.h>
42#include <asm/machdep.h> 43#include <asm/machdep.h>
43#include <asm/io.h> 44#include <asm/io.h>
44#include <asm/system.h> 45#include <asm/system.h>
45#include <asm/sections.h> 46#include <asm/sections.h>
46#include <asm/of_platform.h>
47#include <asm/macio.h> 47#include <asm/macio.h>
48 48
49#define LOG_TEMP 0 /* continously log temperature */ 49#define LOG_TEMP 0 /* continously log temperature */
@@ -62,7 +62,7 @@ static struct {
62 volatile int running; 62 volatile int running;
63 struct task_struct *poll_task; 63 struct task_struct *poll_task;
64 64
65 struct semaphore lock; 65 struct mutex lock;
66 struct of_device *of_dev; 66 struct of_device *of_dev;
67 67
68 struct i2c_client *thermostat; 68 struct i2c_client *thermostat;
@@ -286,23 +286,23 @@ restore_regs( void )
286 286
287static int control_loop(void *dummy) 287static int control_loop(void *dummy)
288{ 288{
289 down(&x.lock); 289 mutex_lock(&x.lock);
290 setup_hardware(); 290 setup_hardware();
291 up(&x.lock); 291 mutex_unlock(&x.lock);
292 292
293 for (;;) { 293 for (;;) {
294 msleep_interruptible(8000); 294 msleep_interruptible(8000);
295 if (kthread_should_stop()) 295 if (kthread_should_stop())
296 break; 296 break;
297 297
298 down(&x.lock); 298 mutex_lock(&x.lock);
299 poll_temp(); 299 poll_temp();
300 up(&x.lock); 300 mutex_unlock(&x.lock);
301 } 301 }
302 302
303 down(&x.lock); 303 mutex_lock(&x.lock);
304 restore_regs(); 304 restore_regs();
305 up(&x.lock); 305 mutex_unlock(&x.lock);
306 306
307 return 0; 307 return 0;
308} 308}
@@ -489,7 +489,7 @@ g4fan_init( void )
489 const struct apple_thermal_info *info; 489 const struct apple_thermal_info *info;
490 struct device_node *np; 490 struct device_node *np;
491 491
492 init_MUTEX( &x.lock ); 492 mutex_init(&x.lock);
493 493
494 if( !(np=of_find_node_by_name(NULL, "power-mgt")) ) 494 if( !(np=of_find_node_by_name(NULL, "power-mgt")) )
495 return -ENODEV; 495 return -ENODEV;
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index e2f84da09e7c..b64741c95ac4 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -101,7 +101,6 @@ static int pmu_kind = PMU_UNKNOWN;
101static int pmu_fully_inited; 101static int pmu_fully_inited;
102 102
103int asleep; 103int asleep;
104BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
105 104
106static int pmu_probe(void); 105static int pmu_probe(void);
107static int pmu_init(void); 106static int pmu_init(void);
@@ -741,8 +740,8 @@ pmu_handle_data(unsigned char *data, int len)
741 } 740 }
742} 741}
743 742
744int backlight_level = -1; 743static int backlight_level = -1;
745int backlight_enabled = 0; 744static int backlight_enabled = 0;
746 745
747#define LEVEL_TO_BRIGHT(lev) ((lev) < 1? 0x7f: 0x4a - ((lev) << 1)) 746#define LEVEL_TO_BRIGHT(lev) ((lev) < 1? 0x7f: 0x4a - ((lev) << 1))
748 747
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 610af916891e..07d92c11b5d8 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -252,27 +252,14 @@ config DM_ZERO
252config DM_MULTIPATH 252config DM_MULTIPATH
253 tristate "Multipath target" 253 tristate "Multipath target"
254 depends on BLK_DEV_DM 254 depends on BLK_DEV_DM
255 # nasty syntax but means make DM_MULTIPATH independent
256 # of SCSI_DH if the latter isn't defined but if
257 # it is, DM_MULTIPATH must depend on it. We get a build
258 # error if SCSI_DH=m and DM_MULTIPATH=y
259 depends on SCSI_DH || !SCSI_DH
255 ---help--- 260 ---help---
256 Allow volume managers to support multipath hardware. 261 Allow volume managers to support multipath hardware.
257 262
258config DM_MULTIPATH_EMC
259 tristate "EMC CX/AX multipath support"
260 depends on DM_MULTIPATH && BLK_DEV_DM
261 ---help---
262 Multipath support for EMC CX/AX series hardware.
263
264config DM_MULTIPATH_RDAC
265 tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)"
266 depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
267 ---help---
268 Multipath support for LSI/Engenio RDAC.
269
270config DM_MULTIPATH_HP
271 tristate "HP MSA multipath support (EXPERIMENTAL)"
272 depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
273 ---help---
274 Multipath support for HP MSA (Active/Passive) series hardware.
275
276config DM_DELAY 263config DM_DELAY
277 tristate "I/O delaying target (EXPERIMENTAL)" 264 tristate "I/O delaying target (EXPERIMENTAL)"
278 depends on BLK_DEV_DM && EXPERIMENTAL 265 depends on BLK_DEV_DM && EXPERIMENTAL
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 7be09eeea293..f1ef33dfd8cf 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -4,11 +4,9 @@
4 4
5dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ 5dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
6 dm-ioctl.o dm-io.o dm-kcopyd.o 6 dm-ioctl.o dm-io.o dm-kcopyd.o
7dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o 7dm-multipath-objs := dm-path-selector.o dm-mpath.o
8dm-snapshot-objs := dm-snap.o dm-exception-store.o 8dm-snapshot-objs := dm-snap.o dm-exception-store.o
9dm-mirror-objs := dm-raid1.o 9dm-mirror-objs := dm-raid1.o
10dm-rdac-objs := dm-mpath-rdac.o
11dm-hp-sw-objs := dm-mpath-hp-sw.o
12md-mod-objs := md.o bitmap.o 10md-mod-objs := md.o bitmap.o
13raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \ 11raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \
14 raid6int1.o raid6int2.o raid6int4.o \ 12 raid6int1.o raid6int2.o raid6int4.o \
@@ -35,9 +33,6 @@ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
35obj-$(CONFIG_DM_CRYPT) += dm-crypt.o 33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
36obj-$(CONFIG_DM_DELAY) += dm-delay.o 34obj-$(CONFIG_DM_DELAY) += dm-delay.o
37obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o 35obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
38obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
39obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o
40obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o
41obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o 36obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
42obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o 37obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o
43obj-$(CONFIG_DM_ZERO) += dm-zero.o 38obj-$(CONFIG_DM_ZERO) += dm-zero.o
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
deleted file mode 100644
index 3ea5ad4b7805..000000000000
--- a/drivers/md/dm-emc.c
+++ /dev/null
@@ -1,345 +0,0 @@
1/*
2 * Copyright (C) 2004 SUSE LINUX Products GmbH. All rights reserved.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 *
7 * Multipath support for EMC CLARiiON AX/CX-series hardware.
8 */
9
10#include "dm.h"
11#include "dm-hw-handler.h"
12#include <scsi/scsi.h>
13#include <scsi/scsi_cmnd.h>
14
15#define DM_MSG_PREFIX "multipath emc"
16
17struct emc_handler {
18 spinlock_t lock;
19
20 /* Whether we should send the short trespass command (FC-series)
21 * or the long version (default for AX/CX CLARiiON arrays). */
22 unsigned short_trespass;
23 /* Whether or not to honor SCSI reservations when initiating a
24 * switch-over. Default: Don't. */
25 unsigned hr;
26
27 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
28};
29
30#define TRESPASS_PAGE 0x22
31#define EMC_FAILOVER_TIMEOUT (60 * HZ)
32
33/* Code borrowed from dm-lsi-rdac by Mike Christie */
34
35static inline void free_bio(struct bio *bio)
36{
37 __free_page(bio->bi_io_vec[0].bv_page);
38 bio_put(bio);
39}
40
41static void emc_endio(struct bio *bio, int error)
42{
43 struct dm_path *path = bio->bi_private;
44
45 /* We also need to look at the sense keys here whether or not to
46 * switch to the next PG etc.
47 *
48 * For now simple logic: either it works or it doesn't.
49 */
50 if (error)
51 dm_pg_init_complete(path, MP_FAIL_PATH);
52 else
53 dm_pg_init_complete(path, 0);
54
55 /* request is freed in block layer */
56 free_bio(bio);
57}
58
59static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size)
60{
61 struct bio *bio;
62 struct page *page;
63
64 bio = bio_alloc(GFP_ATOMIC, 1);
65 if (!bio) {
66 DMERR("get_failover_bio: bio_alloc() failed.");
67 return NULL;
68 }
69
70 bio->bi_rw |= (1 << BIO_RW);
71 bio->bi_bdev = path->dev->bdev;
72 bio->bi_sector = 0;
73 bio->bi_private = path;
74 bio->bi_end_io = emc_endio;
75
76 page = alloc_page(GFP_ATOMIC);
77 if (!page) {
78 DMERR("get_failover_bio: alloc_page() failed.");
79 bio_put(bio);
80 return NULL;
81 }
82
83 if (bio_add_page(bio, page, data_size, 0) != data_size) {
84 DMERR("get_failover_bio: bio_add_page() failed.");
85 __free_page(page);
86 bio_put(bio);
87 return NULL;
88 }
89
90 return bio;
91}
92
93static struct request *get_failover_req(struct emc_handler *h,
94 struct bio *bio, struct dm_path *path)
95{
96 struct request *rq;
97 struct block_device *bdev = bio->bi_bdev;
98 struct request_queue *q = bdev_get_queue(bdev);
99
100 /* FIXME: Figure out why it fails with GFP_ATOMIC. */
101 rq = blk_get_request(q, WRITE, __GFP_WAIT);
102 if (!rq) {
103 DMERR("get_failover_req: blk_get_request failed");
104 return NULL;
105 }
106
107 blk_rq_append_bio(q, rq, bio);
108
109 rq->sense = h->sense;
110 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
111 rq->sense_len = 0;
112
113 rq->timeout = EMC_FAILOVER_TIMEOUT;
114 rq->cmd_type = REQ_TYPE_BLOCK_PC;
115 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
116
117 return rq;
118}
119
120static struct request *emc_trespass_get(struct emc_handler *h,
121 struct dm_path *path)
122{
123 struct bio *bio;
124 struct request *rq;
125 unsigned char *page22;
126 unsigned char long_trespass_pg[] = {
127 0, 0, 0, 0,
128 TRESPASS_PAGE, /* Page code */
129 0x09, /* Page length - 2 */
130 h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */
131 0xff, 0xff, /* Trespass target */
132 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
133 };
134 unsigned char short_trespass_pg[] = {
135 0, 0, 0, 0,
136 TRESPASS_PAGE, /* Page code */
137 0x02, /* Page length - 2 */
138 h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */
139 0xff, /* Trespass target */
140 };
141 unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) :
142 sizeof(long_trespass_pg);
143
144 /* get bio backing */
145 if (data_size > PAGE_SIZE)
146 /* this should never happen */
147 return NULL;
148
149 bio = get_failover_bio(path, data_size);
150 if (!bio) {
151 DMERR("emc_trespass_get: no bio");
152 return NULL;
153 }
154
155 page22 = (unsigned char *)bio_data(bio);
156 memset(page22, 0, data_size);
157
158 memcpy(page22, h->short_trespass ?
159 short_trespass_pg : long_trespass_pg, data_size);
160
161 /* get request for block layer packet command */
162 rq = get_failover_req(h, bio, path);
163 if (!rq) {
164 DMERR("emc_trespass_get: no rq");
165 free_bio(bio);
166 return NULL;
167 }
168
169 /* Prepare the command. */
170 rq->cmd[0] = MODE_SELECT;
171 rq->cmd[1] = 0x10;
172 rq->cmd[4] = data_size;
173 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
174
175 return rq;
176}
177
178static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed,
179 struct dm_path *path)
180{
181 struct request *rq;
182 struct request_queue *q = bdev_get_queue(path->dev->bdev);
183
184 /*
185 * We can either blindly init the pg (then look at the sense),
186 * or we can send some commands to get the state here (then
187 * possibly send the fo cmnd), or we can also have the
188 * initial state passed into us and then get an update here.
189 */
190 if (!q) {
191 DMINFO("emc_pg_init: no queue");
192 goto fail_path;
193 }
194
195 /* FIXME: The request should be pre-allocated. */
196 rq = emc_trespass_get(hwh->context, path);
197 if (!rq) {
198 DMERR("emc_pg_init: no rq");
199 goto fail_path;
200 }
201
202 DMINFO("emc_pg_init: sending switch-over command");
203 elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
204 return;
205
206fail_path:
207 dm_pg_init_complete(path, MP_FAIL_PATH);
208}
209
210static struct emc_handler *alloc_emc_handler(void)
211{
212 struct emc_handler *h = kzalloc(sizeof(*h), GFP_KERNEL);
213
214 if (h)
215 spin_lock_init(&h->lock);
216
217 return h;
218}
219
220static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv)
221{
222 struct emc_handler *h;
223 unsigned hr, short_trespass;
224
225 if (argc == 0) {
226 /* No arguments: use defaults */
227 hr = 0;
228 short_trespass = 0;
229 } else if (argc != 2) {
230 DMWARN("incorrect number of arguments");
231 return -EINVAL;
232 } else {
233 if ((sscanf(argv[0], "%u", &short_trespass) != 1)
234 || (short_trespass > 1)) {
235 DMWARN("invalid trespass mode selected");
236 return -EINVAL;
237 }
238
239 if ((sscanf(argv[1], "%u", &hr) != 1)
240 || (hr > 1)) {
241 DMWARN("invalid honor reservation flag selected");
242 return -EINVAL;
243 }
244 }
245
246 h = alloc_emc_handler();
247 if (!h)
248 return -ENOMEM;
249
250 hwh->context = h;
251
252 if ((h->short_trespass = short_trespass))
253 DMWARN("short trespass command will be send");
254 else
255 DMWARN("long trespass command will be send");
256
257 if ((h->hr = hr))
258 DMWARN("honor reservation bit will be set");
259 else
260 DMWARN("honor reservation bit will not be set (default)");
261
262 return 0;
263}
264
265static void emc_destroy(struct hw_handler *hwh)
266{
267 struct emc_handler *h = (struct emc_handler *) hwh->context;
268
269 kfree(h);
270 hwh->context = NULL;
271}
272
273static unsigned emc_error(struct hw_handler *hwh, struct bio *bio)
274{
275 /* FIXME: Patch from axboe still missing */
276#if 0
277 int sense;
278
279 if (bio->bi_error & BIO_SENSE) {
280 sense = bio->bi_error & 0xffffff; /* sense key / asc / ascq */
281
282 if (sense == 0x020403) {
283 /* LUN Not Ready - Manual Intervention Required
284 * indicates this is a passive path.
285 *
286 * FIXME: However, if this is seen and EVPD C0
287 * indicates that this is due to a NDU in
288 * progress, we should set FAIL_PATH too.
289 * This indicates we might have to do a SCSI
290 * inquiry in the end_io path. Ugh. */
291 return MP_BYPASS_PG | MP_RETRY_IO;
292 } else if (sense == 0x052501) {
293 /* An array based copy is in progress. Do not
294 * fail the path, do not bypass to another PG,
295 * do not retry. Fail the IO immediately.
296 * (Actually this is the same conclusion as in
297 * the default handler, but lets make sure.) */
298 return 0;
299 } else if (sense == 0x062900) {
300 /* Unit Attention Code. This is the first IO
301 * to the new path, so just retry. */
302 return MP_RETRY_IO;
303 }
304 }
305#endif
306
307 /* Try default handler */
308 return dm_scsi_err_handler(hwh, bio);
309}
310
311static struct hw_handler_type emc_hwh = {
312 .name = "emc",
313 .module = THIS_MODULE,
314 .create = emc_create,
315 .destroy = emc_destroy,
316 .pg_init = emc_pg_init,
317 .error = emc_error,
318};
319
320static int __init dm_emc_init(void)
321{
322 int r = dm_register_hw_handler(&emc_hwh);
323
324 if (r < 0)
325 DMERR("register failed %d", r);
326
327 DMINFO("version 0.0.3 loaded");
328
329 return r;
330}
331
332static void __exit dm_emc_exit(void)
333{
334 int r = dm_unregister_hw_handler(&emc_hwh);
335
336 if (r < 0)
337 DMERR("unregister failed %d", r);
338}
339
340module_init(dm_emc_init);
341module_exit(dm_emc_exit);
342
343MODULE_DESCRIPTION(DM_NAME " EMC CX/AX/FC-family multipath");
344MODULE_AUTHOR("Lars Marowsky-Bree <lmb@suse.de>");
345MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-hw-handler.c b/drivers/md/dm-hw-handler.c
deleted file mode 100644
index 2ee84d8aa0bf..000000000000
--- a/drivers/md/dm-hw-handler.c
+++ /dev/null
@@ -1,213 +0,0 @@
1/*
2 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 *
6 * Multipath hardware handler registration.
7 */
8
9#include "dm.h"
10#include "dm-hw-handler.h"
11
12#include <linux/slab.h>
13
14struct hwh_internal {
15 struct hw_handler_type hwht;
16
17 struct list_head list;
18 long use;
19};
20
21#define hwht_to_hwhi(__hwht) container_of((__hwht), struct hwh_internal, hwht)
22
23static LIST_HEAD(_hw_handlers);
24static DECLARE_RWSEM(_hwh_lock);
25
26static struct hwh_internal *__find_hw_handler_type(const char *name)
27{
28 struct hwh_internal *hwhi;
29
30 list_for_each_entry(hwhi, &_hw_handlers, list) {
31 if (!strcmp(name, hwhi->hwht.name))
32 return hwhi;
33 }
34
35 return NULL;
36}
37
38static struct hwh_internal *get_hw_handler(const char *name)
39{
40 struct hwh_internal *hwhi;
41
42 down_read(&_hwh_lock);
43 hwhi = __find_hw_handler_type(name);
44 if (hwhi) {
45 if ((hwhi->use == 0) && !try_module_get(hwhi->hwht.module))
46 hwhi = NULL;
47 else
48 hwhi->use++;
49 }
50 up_read(&_hwh_lock);
51
52 return hwhi;
53}
54
55struct hw_handler_type *dm_get_hw_handler(const char *name)
56{
57 struct hwh_internal *hwhi;
58
59 if (!name)
60 return NULL;
61
62 hwhi = get_hw_handler(name);
63 if (!hwhi) {
64 request_module("dm-%s", name);
65 hwhi = get_hw_handler(name);
66 }
67
68 return hwhi ? &hwhi->hwht : NULL;
69}
70
71void dm_put_hw_handler(struct hw_handler_type *hwht)
72{
73 struct hwh_internal *hwhi;
74
75 if (!hwht)
76 return;
77
78 down_read(&_hwh_lock);
79 hwhi = __find_hw_handler_type(hwht->name);
80 if (!hwhi)
81 goto out;
82
83 if (--hwhi->use == 0)
84 module_put(hwhi->hwht.module);
85
86 BUG_ON(hwhi->use < 0);
87
88 out:
89 up_read(&_hwh_lock);
90}
91
92static struct hwh_internal *_alloc_hw_handler(struct hw_handler_type *hwht)
93{
94 struct hwh_internal *hwhi = kzalloc(sizeof(*hwhi), GFP_KERNEL);
95
96 if (hwhi)
97 hwhi->hwht = *hwht;
98
99 return hwhi;
100}
101
102int dm_register_hw_handler(struct hw_handler_type *hwht)
103{
104 int r = 0;
105 struct hwh_internal *hwhi = _alloc_hw_handler(hwht);
106
107 if (!hwhi)
108 return -ENOMEM;
109
110 down_write(&_hwh_lock);
111
112 if (__find_hw_handler_type(hwht->name)) {
113 kfree(hwhi);
114 r = -EEXIST;
115 } else
116 list_add(&hwhi->list, &_hw_handlers);
117
118 up_write(&_hwh_lock);
119
120 return r;
121}
122
123int dm_unregister_hw_handler(struct hw_handler_type *hwht)
124{
125 struct hwh_internal *hwhi;
126
127 down_write(&_hwh_lock);
128
129 hwhi = __find_hw_handler_type(hwht->name);
130 if (!hwhi) {
131 up_write(&_hwh_lock);
132 return -EINVAL;
133 }
134
135 if (hwhi->use) {
136 up_write(&_hwh_lock);
137 return -ETXTBSY;
138 }
139
140 list_del(&hwhi->list);
141
142 up_write(&_hwh_lock);
143
144 kfree(hwhi);
145
146 return 0;
147}
148
149unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio)
150{
151#if 0
152 int sense_key, asc, ascq;
153
154 if (bio->bi_error & BIO_SENSE) {
155 /* FIXME: This is just an initial guess. */
156 /* key / asc / ascq */
157 sense_key = (bio->bi_error >> 16) & 0xff;
158 asc = (bio->bi_error >> 8) & 0xff;
159 ascq = bio->bi_error & 0xff;
160
161 switch (sense_key) {
162 /* This block as a whole comes from the device.
163 * So no point retrying on another path. */
164 case 0x03: /* Medium error */
165 case 0x05: /* Illegal request */
166 case 0x07: /* Data protect */
167 case 0x08: /* Blank check */
168 case 0x0a: /* copy aborted */
169 case 0x0c: /* obsolete - no clue ;-) */
170 case 0x0d: /* volume overflow */
171 case 0x0e: /* data miscompare */
172 case 0x0f: /* reserved - no idea either. */
173 return MP_ERROR_IO;
174
175 /* For these errors it's unclear whether they
176 * come from the device or the controller.
177 * So just lets try a different path, and if
178 * it eventually succeeds, user-space will clear
179 * the paths again... */
180 case 0x02: /* Not ready */
181 case 0x04: /* Hardware error */
182 case 0x09: /* vendor specific */
183 case 0x0b: /* Aborted command */
184 return MP_FAIL_PATH;
185
186 case 0x06: /* Unit attention - might want to decode */
187 if (asc == 0x04 && ascq == 0x01)
188 /* "Unit in the process of
189 * becoming ready" */
190 return 0;
191 return MP_FAIL_PATH;
192
193 /* FIXME: For Unit Not Ready we may want
194 * to have a generic pg activation
195 * feature (START_UNIT). */
196
197 /* Should these two ever end up in the
198 * error path? I don't think so. */
199 case 0x00: /* No sense */
200 case 0x01: /* Recovered error */
201 return 0;
202 }
203 }
204#endif
205
206 /* We got no idea how to decode the other kinds of errors ->
207 * assume generic error condition. */
208 return MP_FAIL_PATH;
209}
210
211EXPORT_SYMBOL_GPL(dm_register_hw_handler);
212EXPORT_SYMBOL_GPL(dm_unregister_hw_handler);
213EXPORT_SYMBOL_GPL(dm_scsi_err_handler);
diff --git a/drivers/md/dm-hw-handler.h b/drivers/md/dm-hw-handler.h
deleted file mode 100644
index 46809dcb121a..000000000000
--- a/drivers/md/dm-hw-handler.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 *
6 * Multipath hardware handler registration.
7 */
8
9#ifndef DM_HW_HANDLER_H
10#define DM_HW_HANDLER_H
11
12#include <linux/device-mapper.h>
13
14#include "dm-mpath.h"
15
16struct hw_handler_type;
17struct hw_handler {
18 struct hw_handler_type *type;
19 struct mapped_device *md;
20 void *context;
21};
22
23/*
24 * Constructs a hardware handler object, takes custom arguments
25 */
26/* Information about a hardware handler type */
27struct hw_handler_type {
28 char *name;
29 struct module *module;
30
31 int (*create) (struct hw_handler *handler, unsigned int argc,
32 char **argv);
33 void (*destroy) (struct hw_handler *hwh);
34
35 void (*pg_init) (struct hw_handler *hwh, unsigned bypassed,
36 struct dm_path *path);
37 unsigned (*error) (struct hw_handler *hwh, struct bio *bio);
38 int (*status) (struct hw_handler *hwh, status_type_t type,
39 char *result, unsigned int maxlen);
40};
41
42/* Register a hardware handler */
43int dm_register_hw_handler(struct hw_handler_type *type);
44
45/* Unregister a hardware handler */
46int dm_unregister_hw_handler(struct hw_handler_type *type);
47
48/* Returns a registered hardware handler type */
49struct hw_handler_type *dm_get_hw_handler(const char *name);
50
51/* Releases a hardware handler */
52void dm_put_hw_handler(struct hw_handler_type *hwht);
53
54/* Default err function */
55unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio);
56
57/* Error flags for err and dm_pg_init_complete */
58#define MP_FAIL_PATH 1
59#define MP_BYPASS_PG 2
60#define MP_ERROR_IO 4 /* Don't retry this I/O */
61#define MP_RETRY 8
62
63#endif
diff --git a/drivers/md/dm-mpath-hp-sw.c b/drivers/md/dm-mpath-hp-sw.c
deleted file mode 100644
index b63a0ab37c53..000000000000
--- a/drivers/md/dm-mpath-hp-sw.c
+++ /dev/null
@@ -1,247 +0,0 @@
1/*
2 * Copyright (C) 2005 Mike Christie, All rights reserved.
3 * Copyright (C) 2007 Red Hat, Inc. All rights reserved.
4 * Authors: Mike Christie
5 * Dave Wysochanski
6 *
7 * This file is released under the GPL.
8 *
9 * This module implements the specific path activation code for
10 * HP StorageWorks and FSC FibreCat Asymmetric (Active/Passive)
11 * storage arrays.
12 * These storage arrays have controller-based failover, not
13 * LUN-based failover. However, LUN-based failover is the design
14 * of dm-multipath. Thus, this module is written for LUN-based failover.
15 */
16#include <linux/blkdev.h>
17#include <linux/list.h>
18#include <linux/types.h>
19#include <scsi/scsi.h>
20#include <scsi/scsi_cmnd.h>
21#include <scsi/scsi_dbg.h>
22
23#include "dm.h"
24#include "dm-hw-handler.h"
25
26#define DM_MSG_PREFIX "multipath hp-sw"
27#define DM_HP_HWH_NAME "hp-sw"
28#define DM_HP_HWH_VER "1.0.0"
29
30struct hp_sw_context {
31 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
32};
33
34/*
35 * hp_sw_error_is_retryable - Is an HP-specific check condition retryable?
36 * @req: path activation request
37 *
38 * Examine error codes of request and determine whether the error is retryable.
39 * Some error codes are already retried by scsi-ml (see
40 * scsi_decide_disposition), but some HP specific codes are not.
41 * The intent of this routine is to supply the logic for the HP specific
42 * check conditions.
43 *
44 * Returns:
45 * 1 - command completed with retryable error
46 * 0 - command completed with non-retryable error
47 *
48 * Possible optimizations
49 * 1. More hardware-specific error codes
50 */
51static int hp_sw_error_is_retryable(struct request *req)
52{
53 /*
54 * NOT_READY is known to be retryable
55 * For now we just dump out the sense data and call it retryable
56 */
57 if (status_byte(req->errors) == CHECK_CONDITION)
58 __scsi_print_sense(DM_HP_HWH_NAME, req->sense, req->sense_len);
59
60 /*
61 * At this point we don't have complete information about all the error
62 * codes from this hardware, so we are just conservative and retry
63 * when in doubt.
64 */
65 return 1;
66}
67
68/*
69 * hp_sw_end_io - Completion handler for HP path activation.
70 * @req: path activation request
71 * @error: scsi-ml error
72 *
73 * Check sense data, free request structure, and notify dm that
74 * pg initialization has completed.
75 *
76 * Context: scsi-ml softirq
77 *
78 */
79static void hp_sw_end_io(struct request *req, int error)
80{
81 struct dm_path *path = req->end_io_data;
82 unsigned err_flags = 0;
83
84 if (!error) {
85 DMDEBUG("%s path activation command - success",
86 path->dev->name);
87 goto out;
88 }
89
90 if (hp_sw_error_is_retryable(req)) {
91 DMDEBUG("%s path activation command - retry",
92 path->dev->name);
93 err_flags = MP_RETRY;
94 goto out;
95 }
96
97 DMWARN("%s path activation fail - error=0x%x",
98 path->dev->name, error);
99 err_flags = MP_FAIL_PATH;
100
101out:
102 req->end_io_data = NULL;
103 __blk_put_request(req->q, req);
104 dm_pg_init_complete(path, err_flags);
105}
106
107/*
108 * hp_sw_get_request - Allocate an HP specific path activation request
109 * @path: path on which request will be sent (needed for request queue)
110 *
111 * The START command is used for path activation request.
112 * These arrays are controller-based failover, not LUN based.
113 * One START command issued to a single path will fail over all
114 * LUNs for the same controller.
115 *
116 * Possible optimizations
117 * 1. Make timeout configurable
118 * 2. Preallocate request
119 */
120static struct request *hp_sw_get_request(struct dm_path *path)
121{
122 struct request *req;
123 struct block_device *bdev = path->dev->bdev;
124 struct request_queue *q = bdev_get_queue(bdev);
125 struct hp_sw_context *h = path->hwhcontext;
126
127 req = blk_get_request(q, WRITE, GFP_NOIO);
128 if (!req)
129 goto out;
130
131 req->timeout = 60 * HZ;
132
133 req->errors = 0;
134 req->cmd_type = REQ_TYPE_BLOCK_PC;
135 req->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
136 req->end_io_data = path;
137 req->sense = h->sense;
138 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
139
140 req->cmd[0] = START_STOP;
141 req->cmd[4] = 1;
142 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
143
144out:
145 return req;
146}
147
148/*
149 * hp_sw_pg_init - HP path activation implementation.
150 * @hwh: hardware handler specific data
151 * @bypassed: unused; is the path group bypassed? (see dm-mpath.c)
152 * @path: path to send initialization command
153 *
154 * Send an HP-specific path activation command on 'path'.
155 * Do not try to optimize in any way, just send the activation command.
156 * More than one path activation command may be sent to the same controller.
157 * This seems to work fine for basic failover support.
158 *
159 * Possible optimizations
160 * 1. Detect an in-progress activation request and avoid submitting another one
161 * 2. Model the controller and only send a single activation request at a time
162 * 3. Determine the state of a path before sending an activation request
163 *
164 * Context: kmpathd (see process_queued_ios() in dm-mpath.c)
165 */
166static void hp_sw_pg_init(struct hw_handler *hwh, unsigned bypassed,
167 struct dm_path *path)
168{
169 struct request *req;
170 struct hp_sw_context *h;
171
172 path->hwhcontext = hwh->context;
173 h = hwh->context;
174
175 req = hp_sw_get_request(path);
176 if (!req) {
177 DMERR("%s path activation command - allocation fail",
178 path->dev->name);
179 goto retry;
180 }
181
182 DMDEBUG("%s path activation command - sent", path->dev->name);
183
184 blk_execute_rq_nowait(req->q, NULL, req, 1, hp_sw_end_io);
185 return;
186
187retry:
188 dm_pg_init_complete(path, MP_RETRY);
189}
190
191static int hp_sw_create(struct hw_handler *hwh, unsigned argc, char **argv)
192{
193 struct hp_sw_context *h;
194
195 h = kmalloc(sizeof(*h), GFP_KERNEL);
196 if (!h)
197 return -ENOMEM;
198
199 hwh->context = h;
200
201 return 0;
202}
203
204static void hp_sw_destroy(struct hw_handler *hwh)
205{
206 struct hp_sw_context *h = hwh->context;
207
208 kfree(h);
209}
210
211static struct hw_handler_type hp_sw_hwh = {
212 .name = DM_HP_HWH_NAME,
213 .module = THIS_MODULE,
214 .create = hp_sw_create,
215 .destroy = hp_sw_destroy,
216 .pg_init = hp_sw_pg_init,
217};
218
219static int __init hp_sw_init(void)
220{
221 int r;
222
223 r = dm_register_hw_handler(&hp_sw_hwh);
224 if (r < 0)
225 DMERR("register failed %d", r);
226 else
227 DMINFO("version " DM_HP_HWH_VER " loaded");
228
229 return r;
230}
231
232static void __exit hp_sw_exit(void)
233{
234 int r;
235
236 r = dm_unregister_hw_handler(&hp_sw_hwh);
237 if (r < 0)
238 DMERR("unregister failed %d", r);
239}
240
241module_init(hp_sw_init);
242module_exit(hp_sw_exit);
243
244MODULE_DESCRIPTION("DM Multipath HP StorageWorks / FSC FibreCat (A/P) support");
245MODULE_AUTHOR("Mike Christie, Dave Wysochanski <dm-devel@redhat.com>");
246MODULE_LICENSE("GPL");
247MODULE_VERSION(DM_HP_HWH_VER);
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c
deleted file mode 100644
index 95e77734880a..000000000000
--- a/drivers/md/dm-mpath-rdac.c
+++ /dev/null
@@ -1,700 +0,0 @@
1/*
2 * Engenio/LSI RDAC DM HW handler
3 *
4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22#include <scsi/scsi.h>
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_eh.h>
25
26#define DM_MSG_PREFIX "multipath rdac"
27
28#include "dm.h"
29#include "dm-hw-handler.h"
30
31#define RDAC_DM_HWH_NAME "rdac"
32#define RDAC_DM_HWH_VER "0.4"
33
34/*
35 * LSI mode page stuff
36 *
37 * These struct definitions and the forming of the
38 * mode page were taken from the LSI RDAC 2.4 GPL'd
39 * driver, and then converted to Linux conventions.
40 */
41#define RDAC_QUIESCENCE_TIME 20;
42/*
43 * Page Codes
44 */
45#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
46
47/*
48 * Controller modes definitions
49 */
50#define RDAC_MODE_TRANSFER_ALL_LUNS 0x01
51#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
52
53/*
54 * RDAC Options field
55 */
56#define RDAC_FORCED_QUIESENCE 0x02
57
58#define RDAC_FAILOVER_TIMEOUT (60 * HZ)
59
60struct rdac_mode_6_hdr {
61 u8 data_len;
62 u8 medium_type;
63 u8 device_params;
64 u8 block_desc_len;
65};
66
67struct rdac_mode_10_hdr {
68 u16 data_len;
69 u8 medium_type;
70 u8 device_params;
71 u16 reserved;
72 u16 block_desc_len;
73};
74
75struct rdac_mode_common {
76 u8 controller_serial[16];
77 u8 alt_controller_serial[16];
78 u8 rdac_mode[2];
79 u8 alt_rdac_mode[2];
80 u8 quiescence_timeout;
81 u8 rdac_options;
82};
83
84struct rdac_pg_legacy {
85 struct rdac_mode_6_hdr hdr;
86 u8 page_code;
87 u8 page_len;
88 struct rdac_mode_common common;
89#define MODE6_MAX_LUN 32
90 u8 lun_table[MODE6_MAX_LUN];
91 u8 reserved2[32];
92 u8 reserved3;
93 u8 reserved4;
94};
95
96struct rdac_pg_expanded {
97 struct rdac_mode_10_hdr hdr;
98 u8 page_code;
99 u8 subpage_code;
100 u8 page_len[2];
101 struct rdac_mode_common common;
102 u8 lun_table[256];
103 u8 reserved3;
104 u8 reserved4;
105};
106
107struct c9_inquiry {
108 u8 peripheral_info;
109 u8 page_code; /* 0xC9 */
110 u8 reserved1;
111 u8 page_len;
112 u8 page_id[4]; /* "vace" */
113 u8 avte_cvp;
114 u8 path_prio;
115 u8 reserved2[38];
116};
117
118#define SUBSYS_ID_LEN 16
119#define SLOT_ID_LEN 2
120
121struct c4_inquiry {
122 u8 peripheral_info;
123 u8 page_code; /* 0xC4 */
124 u8 reserved1;
125 u8 page_len;
126 u8 page_id[4]; /* "subs" */
127 u8 subsys_id[SUBSYS_ID_LEN];
128 u8 revision[4];
129 u8 slot_id[SLOT_ID_LEN];
130 u8 reserved[2];
131};
132
133struct rdac_controller {
134 u8 subsys_id[SUBSYS_ID_LEN];
135 u8 slot_id[SLOT_ID_LEN];
136 int use_10_ms;
137 struct kref kref;
138 struct list_head node; /* list of all controllers */
139 spinlock_t lock;
140 int submitted;
141 struct list_head cmd_list; /* list of commands to be submitted */
142 union {
143 struct rdac_pg_legacy legacy;
144 struct rdac_pg_expanded expanded;
145 } mode_select;
146};
147struct c8_inquiry {
148 u8 peripheral_info;
149 u8 page_code; /* 0xC8 */
150 u8 reserved1;
151 u8 page_len;
152 u8 page_id[4]; /* "edid" */
153 u8 reserved2[3];
154 u8 vol_uniq_id_len;
155 u8 vol_uniq_id[16];
156 u8 vol_user_label_len;
157 u8 vol_user_label[60];
158 u8 array_uniq_id_len;
159 u8 array_unique_id[16];
160 u8 array_user_label_len;
161 u8 array_user_label[60];
162 u8 lun[8];
163};
164
165struct c2_inquiry {
166 u8 peripheral_info;
167 u8 page_code; /* 0xC2 */
168 u8 reserved1;
169 u8 page_len;
170 u8 page_id[4]; /* "swr4" */
171 u8 sw_version[3];
172 u8 sw_date[3];
173 u8 features_enabled;
174 u8 max_lun_supported;
175 u8 partitions[239]; /* Total allocation length should be 0xFF */
176};
177
178struct rdac_handler {
179 struct list_head entry; /* list waiting to submit MODE SELECT */
180 unsigned timeout;
181 struct rdac_controller *ctlr;
182#define UNINITIALIZED_LUN (1 << 8)
183 unsigned lun;
184 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
185 struct dm_path *path;
186 struct work_struct work;
187#define SEND_C2_INQUIRY 1
188#define SEND_C4_INQUIRY 2
189#define SEND_C8_INQUIRY 3
190#define SEND_C9_INQUIRY 4
191#define SEND_MODE_SELECT 5
192 int cmd_to_send;
193 union {
194 struct c2_inquiry c2;
195 struct c4_inquiry c4;
196 struct c8_inquiry c8;
197 struct c9_inquiry c9;
198 } inq;
199};
200
201static LIST_HEAD(ctlr_list);
202static DEFINE_SPINLOCK(list_lock);
203static struct workqueue_struct *rdac_wkqd;
204
205static inline int had_failures(struct request *req, int error)
206{
207 return (error || host_byte(req->errors) != DID_OK ||
208 msg_byte(req->errors) != COMMAND_COMPLETE);
209}
210
211static void rdac_resubmit_all(struct rdac_handler *h)
212{
213 struct rdac_controller *ctlr = h->ctlr;
214 struct rdac_handler *tmp, *h1;
215
216 spin_lock(&ctlr->lock);
217 list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) {
218 h1->cmd_to_send = SEND_C9_INQUIRY;
219 queue_work(rdac_wkqd, &h1->work);
220 list_del(&h1->entry);
221 }
222 ctlr->submitted = 0;
223 spin_unlock(&ctlr->lock);
224}
225
226static void mode_select_endio(struct request *req, int error)
227{
228 struct rdac_handler *h = req->end_io_data;
229 struct scsi_sense_hdr sense_hdr;
230 int sense = 0, fail = 0;
231
232 if (had_failures(req, error)) {
233 fail = 1;
234 goto failed;
235 }
236
237 if (status_byte(req->errors) == CHECK_CONDITION) {
238 scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE,
239 &sense_hdr);
240 sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
241 sense_hdr.ascq;
242 /* If it is retryable failure, submit the c9 inquiry again */
243 if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
244 sense == 0x62900) {
245 /* 0x59136 - Command lock contention
246 * 0x[6b]8b02 - Quiesense in progress or achieved
247 * 0x62900 - Power On, Reset, or Bus Device Reset
248 */
249 h->cmd_to_send = SEND_C9_INQUIRY;
250 queue_work(rdac_wkqd, &h->work);
251 goto done;
252 }
253 if (sense)
254 DMINFO("MODE_SELECT failed on %s with sense 0x%x",
255 h->path->dev->name, sense);
256 }
257failed:
258 if (fail || sense)
259 dm_pg_init_complete(h->path, MP_FAIL_PATH);
260 else
261 dm_pg_init_complete(h->path, 0);
262
263done:
264 rdac_resubmit_all(h);
265 __blk_put_request(req->q, req);
266}
267
268static struct request *get_rdac_req(struct rdac_handler *h,
269 void *buffer, unsigned buflen, int rw)
270{
271 struct request *rq;
272 struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
273
274 rq = blk_get_request(q, rw, GFP_KERNEL);
275
276 if (!rq) {
277 DMINFO("get_rdac_req: blk_get_request failed");
278 return NULL;
279 }
280
281 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
282 blk_put_request(rq);
283 DMINFO("get_rdac_req: blk_rq_map_kern failed");
284 return NULL;
285 }
286
287 rq->sense = h->sense;
288 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
289 rq->sense_len = 0;
290
291 rq->end_io_data = h;
292 rq->timeout = h->timeout;
293 rq->cmd_type = REQ_TYPE_BLOCK_PC;
294 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
295 return rq;
296}
297
298static struct request *rdac_failover_get(struct rdac_handler *h)
299{
300 struct request *rq;
301 struct rdac_mode_common *common;
302 unsigned data_size;
303
304 if (h->ctlr->use_10_ms) {
305 struct rdac_pg_expanded *rdac_pg;
306
307 data_size = sizeof(struct rdac_pg_expanded);
308 rdac_pg = &h->ctlr->mode_select.expanded;
309 memset(rdac_pg, 0, data_size);
310 common = &rdac_pg->common;
311 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
312 rdac_pg->subpage_code = 0x1;
313 rdac_pg->page_len[0] = 0x01;
314 rdac_pg->page_len[1] = 0x28;
315 rdac_pg->lun_table[h->lun] = 0x81;
316 } else {
317 struct rdac_pg_legacy *rdac_pg;
318
319 data_size = sizeof(struct rdac_pg_legacy);
320 rdac_pg = &h->ctlr->mode_select.legacy;
321 memset(rdac_pg, 0, data_size);
322 common = &rdac_pg->common;
323 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
324 rdac_pg->page_len = 0x68;
325 rdac_pg->lun_table[h->lun] = 0x81;
326 }
327 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
328 common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
329 common->rdac_options = RDAC_FORCED_QUIESENCE;
330
331 /* get request for block layer packet command */
332 rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE);
333 if (!rq) {
334 DMERR("rdac_failover_get: no rq");
335 return NULL;
336 }
337
338 /* Prepare the command. */
339 if (h->ctlr->use_10_ms) {
340 rq->cmd[0] = MODE_SELECT_10;
341 rq->cmd[7] = data_size >> 8;
342 rq->cmd[8] = data_size & 0xff;
343 } else {
344 rq->cmd[0] = MODE_SELECT;
345 rq->cmd[4] = data_size;
346 }
347 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
348
349 return rq;
350}
351
352/* Acquires h->ctlr->lock */
353static void submit_mode_select(struct rdac_handler *h)
354{
355 struct request *rq;
356 struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
357
358 spin_lock(&h->ctlr->lock);
359 if (h->ctlr->submitted) {
360 list_add(&h->entry, &h->ctlr->cmd_list);
361 goto drop_lock;
362 }
363
364 if (!q) {
365 DMINFO("submit_mode_select: no queue");
366 goto fail_path;
367 }
368
369 rq = rdac_failover_get(h);
370 if (!rq) {
371 DMERR("submit_mode_select: no rq");
372 goto fail_path;
373 }
374
375 DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name);
376
377 blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio);
378 h->ctlr->submitted = 1;
379 goto drop_lock;
380fail_path:
381 dm_pg_init_complete(h->path, MP_FAIL_PATH);
382drop_lock:
383 spin_unlock(&h->ctlr->lock);
384}
385
386static void release_ctlr(struct kref *kref)
387{
388 struct rdac_controller *ctlr;
389 ctlr = container_of(kref, struct rdac_controller, kref);
390
391 spin_lock(&list_lock);
392 list_del(&ctlr->node);
393 spin_unlock(&list_lock);
394 kfree(ctlr);
395}
396
397static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
398{
399 struct rdac_controller *ctlr, *tmp;
400
401 spin_lock(&list_lock);
402
403 list_for_each_entry(tmp, &ctlr_list, node) {
404 if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
405 (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
406 kref_get(&tmp->kref);
407 spin_unlock(&list_lock);
408 return tmp;
409 }
410 }
411 ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
412 if (!ctlr)
413 goto done;
414
415 /* initialize fields of controller */
416 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
417 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
418 kref_init(&ctlr->kref);
419 spin_lock_init(&ctlr->lock);
420 ctlr->submitted = 0;
421 ctlr->use_10_ms = -1;
422 INIT_LIST_HEAD(&ctlr->cmd_list);
423 list_add(&ctlr->node, &ctlr_list);
424done:
425 spin_unlock(&list_lock);
426 return ctlr;
427}
428
429static void c4_endio(struct request *req, int error)
430{
431 struct rdac_handler *h = req->end_io_data;
432 struct c4_inquiry *sp;
433
434 if (had_failures(req, error)) {
435 dm_pg_init_complete(h->path, MP_FAIL_PATH);
436 goto done;
437 }
438
439 sp = &h->inq.c4;
440
441 h->ctlr = get_controller(sp->subsys_id, sp->slot_id);
442
443 if (h->ctlr) {
444 h->cmd_to_send = SEND_C9_INQUIRY;
445 queue_work(rdac_wkqd, &h->work);
446 } else
447 dm_pg_init_complete(h->path, MP_FAIL_PATH);
448done:
449 __blk_put_request(req->q, req);
450}
451
452static void c2_endio(struct request *req, int error)
453{
454 struct rdac_handler *h = req->end_io_data;
455 struct c2_inquiry *sp;
456
457 if (had_failures(req, error)) {
458 dm_pg_init_complete(h->path, MP_FAIL_PATH);
459 goto done;
460 }
461
462 sp = &h->inq.c2;
463
464 /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */
465 if (sp->max_lun_supported >= MODE6_MAX_LUN)
466 h->ctlr->use_10_ms = 1;
467 else
468 h->ctlr->use_10_ms = 0;
469
470 h->cmd_to_send = SEND_MODE_SELECT;
471 queue_work(rdac_wkqd, &h->work);
472done:
473 __blk_put_request(req->q, req);
474}
475
476static void c9_endio(struct request *req, int error)
477{
478 struct rdac_handler *h = req->end_io_data;
479 struct c9_inquiry *sp;
480
481 if (had_failures(req, error)) {
482 dm_pg_init_complete(h->path, MP_FAIL_PATH);
483 goto done;
484 }
485
486 /* We need to look at the sense keys here to take clear action.
487 * For now simple logic: If the host is in AVT mode or if controller
488 * owns the lun, return dm_pg_init_complete(), otherwise submit
489 * MODE SELECT.
490 */
491 sp = &h->inq.c9;
492
493 /* If in AVT mode, return success */
494 if ((sp->avte_cvp >> 7) == 0x1) {
495 dm_pg_init_complete(h->path, 0);
496 goto done;
497 }
498
499 /* If the controller on this path owns the LUN, return success */
500 if (sp->avte_cvp & 0x1) {
501 dm_pg_init_complete(h->path, 0);
502 goto done;
503 }
504
505 if (h->ctlr) {
506 if (h->ctlr->use_10_ms == -1)
507 h->cmd_to_send = SEND_C2_INQUIRY;
508 else
509 h->cmd_to_send = SEND_MODE_SELECT;
510 } else
511 h->cmd_to_send = SEND_C4_INQUIRY;
512 queue_work(rdac_wkqd, &h->work);
513done:
514 __blk_put_request(req->q, req);
515}
516
517static void c8_endio(struct request *req, int error)
518{
519 struct rdac_handler *h = req->end_io_data;
520 struct c8_inquiry *sp;
521
522 if (had_failures(req, error)) {
523 dm_pg_init_complete(h->path, MP_FAIL_PATH);
524 goto done;
525 }
526
527 /* We need to look at the sense keys here to take clear action.
528 * For now simple logic: Get the lun from the inquiry page.
529 */
530 sp = &h->inq.c8;
531 h->lun = sp->lun[7]; /* currently it uses only one byte */
532 h->cmd_to_send = SEND_C9_INQUIRY;
533 queue_work(rdac_wkqd, &h->work);
534done:
535 __blk_put_request(req->q, req);
536}
537
538static void submit_inquiry(struct rdac_handler *h, int page_code,
539 unsigned int len, rq_end_io_fn endio)
540{
541 struct request *rq;
542 struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
543
544 if (!q)
545 goto fail_path;
546
547 rq = get_rdac_req(h, &h->inq, len, READ);
548 if (!rq)
549 goto fail_path;
550
551 /* Prepare the command. */
552 rq->cmd[0] = INQUIRY;
553 rq->cmd[1] = 1;
554 rq->cmd[2] = page_code;
555 rq->cmd[4] = len;
556 rq->cmd_len = COMMAND_SIZE(INQUIRY);
557 blk_execute_rq_nowait(q, NULL, rq, 1, endio);
558 return;
559
560fail_path:
561 dm_pg_init_complete(h->path, MP_FAIL_PATH);
562}
563
564static void service_wkq(struct work_struct *work)
565{
566 struct rdac_handler *h = container_of(work, struct rdac_handler, work);
567
568 switch (h->cmd_to_send) {
569 case SEND_C2_INQUIRY:
570 submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio);
571 break;
572 case SEND_C4_INQUIRY:
573 submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio);
574 break;
575 case SEND_C8_INQUIRY:
576 submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
577 break;
578 case SEND_C9_INQUIRY:
579 submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
580 break;
581 case SEND_MODE_SELECT:
582 submit_mode_select(h);
583 break;
584 default:
585 BUG();
586 }
587}
588/*
589 * only support subpage2c until we confirm that this is just a matter of
590 * of updating firmware or not, and RDAC (basic AVT works already) for now
591 * but we can add these in in when we get time and testers
592 */
593static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv)
594{
595 struct rdac_handler *h;
596 unsigned timeout;
597
598 if (argc == 0) {
599 /* No arguments: use defaults */
600 timeout = RDAC_FAILOVER_TIMEOUT;
601 } else if (argc != 1) {
602 DMWARN("incorrect number of arguments");
603 return -EINVAL;
604 } else {
605 if (sscanf(argv[1], "%u", &timeout) != 1) {
606 DMWARN("invalid timeout value");
607 return -EINVAL;
608 }
609 }
610
611 h = kzalloc(sizeof(*h), GFP_KERNEL);
612 if (!h)
613 return -ENOMEM;
614
615 hwh->context = h;
616 h->timeout = timeout;
617 h->lun = UNINITIALIZED_LUN;
618 INIT_WORK(&h->work, service_wkq);
619 DMWARN("using RDAC command with timeout %u", h->timeout);
620
621 return 0;
622}
623
624static void rdac_destroy(struct hw_handler *hwh)
625{
626 struct rdac_handler *h = hwh->context;
627
628 if (h->ctlr)
629 kref_put(&h->ctlr->kref, release_ctlr);
630 kfree(h);
631 hwh->context = NULL;
632}
633
634static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio)
635{
636 /* Try default handler */
637 return dm_scsi_err_handler(hwh, bio);
638}
639
640static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed,
641 struct dm_path *path)
642{
643 struct rdac_handler *h = hwh->context;
644
645 h->path = path;
646 switch (h->lun) {
647 case UNINITIALIZED_LUN:
648 submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
649 break;
650 default:
651 submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
652 }
653}
654
655static struct hw_handler_type rdac_handler = {
656 .name = RDAC_DM_HWH_NAME,
657 .module = THIS_MODULE,
658 .create = rdac_create,
659 .destroy = rdac_destroy,
660 .pg_init = rdac_pg_init,
661 .error = rdac_error,
662};
663
664static int __init rdac_init(void)
665{
666 int r;
667
668 rdac_wkqd = create_singlethread_workqueue("rdac_wkqd");
669 if (!rdac_wkqd) {
670 DMERR("Failed to create workqueue rdac_wkqd.");
671 return -ENOMEM;
672 }
673
674 r = dm_register_hw_handler(&rdac_handler);
675 if (r < 0) {
676 DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r);
677 destroy_workqueue(rdac_wkqd);
678 return r;
679 }
680
681 DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER);
682 return 0;
683}
684
685static void __exit rdac_exit(void)
686{
687 int r = dm_unregister_hw_handler(&rdac_handler);
688
689 destroy_workqueue(rdac_wkqd);
690 if (r < 0)
691 DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r);
692}
693
694module_init(rdac_init);
695module_exit(rdac_exit);
696
697MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support");
698MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
699MODULE_LICENSE("GPL");
700MODULE_VERSION(RDAC_DM_HWH_VER);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e7ee59e655d5..9f7302d4878d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -7,7 +7,6 @@
7 7
8#include "dm.h" 8#include "dm.h"
9#include "dm-path-selector.h" 9#include "dm-path-selector.h"
10#include "dm-hw-handler.h"
11#include "dm-bio-list.h" 10#include "dm-bio-list.h"
12#include "dm-bio-record.h" 11#include "dm-bio-record.h"
13#include "dm-uevent.h" 12#include "dm-uevent.h"
@@ -20,6 +19,7 @@
20#include <linux/slab.h> 19#include <linux/slab.h>
21#include <linux/time.h> 20#include <linux/time.h>
22#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22#include <scsi/scsi_dh.h>
23#include <asm/atomic.h> 23#include <asm/atomic.h>
24 24
25#define DM_MSG_PREFIX "multipath" 25#define DM_MSG_PREFIX "multipath"
@@ -61,7 +61,8 @@ struct multipath {
61 61
62 spinlock_t lock; 62 spinlock_t lock;
63 63
64 struct hw_handler hw_handler; 64 const char *hw_handler_name;
65 struct work_struct activate_path;
65 unsigned nr_priority_groups; 66 unsigned nr_priority_groups;
66 struct list_head priority_groups; 67 struct list_head priority_groups;
67 unsigned pg_init_required; /* pg_init needs calling? */ 68 unsigned pg_init_required; /* pg_init needs calling? */
@@ -106,9 +107,10 @@ typedef int (*action_fn) (struct pgpath *pgpath);
106 107
107static struct kmem_cache *_mpio_cache; 108static struct kmem_cache *_mpio_cache;
108 109
109static struct workqueue_struct *kmultipathd; 110static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
110static void process_queued_ios(struct work_struct *work); 111static void process_queued_ios(struct work_struct *work);
111static void trigger_event(struct work_struct *work); 112static void trigger_event(struct work_struct *work);
113static void activate_path(struct work_struct *work);
112 114
113 115
114/*----------------------------------------------- 116/*-----------------------------------------------
@@ -178,6 +180,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
178 m->queue_io = 1; 180 m->queue_io = 1;
179 INIT_WORK(&m->process_queued_ios, process_queued_ios); 181 INIT_WORK(&m->process_queued_ios, process_queued_ios);
180 INIT_WORK(&m->trigger_event, trigger_event); 182 INIT_WORK(&m->trigger_event, trigger_event);
183 INIT_WORK(&m->activate_path, activate_path);
181 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 184 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
182 if (!m->mpio_pool) { 185 if (!m->mpio_pool) {
183 kfree(m); 186 kfree(m);
@@ -193,18 +196,13 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
193static void free_multipath(struct multipath *m) 196static void free_multipath(struct multipath *m)
194{ 197{
195 struct priority_group *pg, *tmp; 198 struct priority_group *pg, *tmp;
196 struct hw_handler *hwh = &m->hw_handler;
197 199
198 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { 200 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
199 list_del(&pg->list); 201 list_del(&pg->list);
200 free_priority_group(pg, m->ti); 202 free_priority_group(pg, m->ti);
201 } 203 }
202 204
203 if (hwh->type) { 205 kfree(m->hw_handler_name);
204 hwh->type->destroy(hwh);
205 dm_put_hw_handler(hwh->type);
206 }
207
208 mempool_destroy(m->mpio_pool); 206 mempool_destroy(m->mpio_pool);
209 kfree(m); 207 kfree(m);
210} 208}
@@ -216,12 +214,10 @@ static void free_multipath(struct multipath *m)
216 214
217static void __switch_pg(struct multipath *m, struct pgpath *pgpath) 215static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
218{ 216{
219 struct hw_handler *hwh = &m->hw_handler;
220
221 m->current_pg = pgpath->pg; 217 m->current_pg = pgpath->pg;
222 218
223 /* Must we initialise the PG first, and queue I/O till it's ready? */ 219 /* Must we initialise the PG first, and queue I/O till it's ready? */
224 if (hwh->type && hwh->type->pg_init) { 220 if (m->hw_handler_name) {
225 m->pg_init_required = 1; 221 m->pg_init_required = 1;
226 m->queue_io = 1; 222 m->queue_io = 1;
227 } else { 223 } else {
@@ -409,7 +405,6 @@ static void process_queued_ios(struct work_struct *work)
409{ 405{
410 struct multipath *m = 406 struct multipath *m =
411 container_of(work, struct multipath, process_queued_ios); 407 container_of(work, struct multipath, process_queued_ios);
412 struct hw_handler *hwh = &m->hw_handler;
413 struct pgpath *pgpath = NULL; 408 struct pgpath *pgpath = NULL;
414 unsigned init_required = 0, must_queue = 1; 409 unsigned init_required = 0, must_queue = 1;
415 unsigned long flags; 410 unsigned long flags;
@@ -439,7 +434,7 @@ out:
439 spin_unlock_irqrestore(&m->lock, flags); 434 spin_unlock_irqrestore(&m->lock, flags);
440 435
441 if (init_required) 436 if (init_required)
442 hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path); 437 queue_work(kmpath_handlerd, &m->activate_path);
443 438
444 if (!must_queue) 439 if (!must_queue)
445 dispatch_queued_ios(m); 440 dispatch_queued_ios(m);
@@ -652,8 +647,6 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
652 647
653static int parse_hw_handler(struct arg_set *as, struct multipath *m) 648static int parse_hw_handler(struct arg_set *as, struct multipath *m)
654{ 649{
655 int r;
656 struct hw_handler_type *hwht;
657 unsigned hw_argc; 650 unsigned hw_argc;
658 struct dm_target *ti = m->ti; 651 struct dm_target *ti = m->ti;
659 652
@@ -661,30 +654,20 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
661 {0, 1024, "invalid number of hardware handler args"}, 654 {0, 1024, "invalid number of hardware handler args"},
662 }; 655 };
663 656
664 r = read_param(_params, shift(as), &hw_argc, &ti->error); 657 if (read_param(_params, shift(as), &hw_argc, &ti->error))
665 if (r)
666 return -EINVAL; 658 return -EINVAL;
667 659
668 if (!hw_argc) 660 if (!hw_argc)
669 return 0; 661 return 0;
670 662
671 hwht = dm_get_hw_handler(shift(as)); 663 m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
672 if (!hwht) { 664 request_module("scsi_dh_%s", m->hw_handler_name);
665 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
673 ti->error = "unknown hardware handler type"; 666 ti->error = "unknown hardware handler type";
667 kfree(m->hw_handler_name);
668 m->hw_handler_name = NULL;
674 return -EINVAL; 669 return -EINVAL;
675 } 670 }
676
677 m->hw_handler.md = dm_table_get_md(ti->table);
678 dm_put(m->hw_handler.md);
679
680 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
681 if (r) {
682 dm_put_hw_handler(hwht);
683 ti->error = "hardware handler constructor failed";
684 return r;
685 }
686
687 m->hw_handler.type = hwht;
688 consume(as, hw_argc - 1); 671 consume(as, hw_argc - 1);
689 672
690 return 0; 673 return 0;
@@ -808,6 +791,7 @@ static void multipath_dtr(struct dm_target *ti)
808{ 791{
809 struct multipath *m = (struct multipath *) ti->private; 792 struct multipath *m = (struct multipath *) ti->private;
810 793
794 flush_workqueue(kmpath_handlerd);
811 flush_workqueue(kmultipathd); 795 flush_workqueue(kmultipathd);
812 free_multipath(m); 796 free_multipath(m);
813} 797}
@@ -1025,52 +1009,85 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1025 return limit_reached; 1009 return limit_reached;
1026} 1010}
1027 1011
1028/* 1012static void pg_init_done(struct dm_path *path, int errors)
1029 * pg_init must call this when it has completed its initialisation
1030 */
1031void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
1032{ 1013{
1033 struct pgpath *pgpath = path_to_pgpath(path); 1014 struct pgpath *pgpath = path_to_pgpath(path);
1034 struct priority_group *pg = pgpath->pg; 1015 struct priority_group *pg = pgpath->pg;
1035 struct multipath *m = pg->m; 1016 struct multipath *m = pg->m;
1036 unsigned long flags; 1017 unsigned long flags;
1037 1018
1038 /* 1019 /* device or driver problems */
1039 * If requested, retry pg_init until maximum number of retries exceeded. 1020 switch (errors) {
1040 * If retry not requested and PG already bypassed, always fail the path. 1021 case SCSI_DH_OK:
1041 */ 1022 break;
1042 if (err_flags & MP_RETRY) { 1023 case SCSI_DH_NOSYS:
1043 if (pg_init_limit_reached(m, pgpath)) 1024 if (!m->hw_handler_name) {
1044 err_flags |= MP_FAIL_PATH; 1025 errors = 0;
1045 } else if (err_flags && pg->bypassed) 1026 break;
1046 err_flags |= MP_FAIL_PATH; 1027 }
1047 1028 DMERR("Cannot failover device because scsi_dh_%s was not "
1048 if (err_flags & MP_FAIL_PATH) 1029 "loaded.", m->hw_handler_name);
1030 /*
1031 * Fail path for now, so we do not ping pong
1032 */
1049 fail_path(pgpath); 1033 fail_path(pgpath);
1050 1034 break;
1051 if (err_flags & MP_BYPASS_PG) 1035 case SCSI_DH_DEV_TEMP_BUSY:
1036 /*
1037 * Probably doing something like FW upgrade on the
1038 * controller so try the other pg.
1039 */
1052 bypass_pg(m, pg, 1); 1040 bypass_pg(m, pg, 1);
1041 break;
1042 /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
1043 case SCSI_DH_RETRY:
1044 case SCSI_DH_IMM_RETRY:
1045 case SCSI_DH_RES_TEMP_UNAVAIL:
1046 if (pg_init_limit_reached(m, pgpath))
1047 fail_path(pgpath);
1048 errors = 0;
1049 break;
1050 default:
1051 /*
1052 * We probably do not want to fail the path for a device
1053 * error, but this is what the old dm did. In future
1054 * patches we can do more advanced handling.
1055 */
1056 fail_path(pgpath);
1057 }
1053 1058
1054 spin_lock_irqsave(&m->lock, flags); 1059 spin_lock_irqsave(&m->lock, flags);
1055 if (err_flags & ~MP_RETRY) { 1060 if (errors) {
1061 DMERR("Could not failover device. Error %d.", errors);
1056 m->current_pgpath = NULL; 1062 m->current_pgpath = NULL;
1057 m->current_pg = NULL; 1063 m->current_pg = NULL;
1058 } else if (!m->pg_init_required) 1064 } else if (!m->pg_init_required) {
1059 m->queue_io = 0; 1065 m->queue_io = 0;
1066 pg->bypassed = 0;
1067 }
1060 1068
1061 m->pg_init_in_progress = 0; 1069 m->pg_init_in_progress = 0;
1062 queue_work(kmultipathd, &m->process_queued_ios); 1070 queue_work(kmultipathd, &m->process_queued_ios);
1063 spin_unlock_irqrestore(&m->lock, flags); 1071 spin_unlock_irqrestore(&m->lock, flags);
1064} 1072}
1065 1073
1074static void activate_path(struct work_struct *work)
1075{
1076 int ret;
1077 struct multipath *m =
1078 container_of(work, struct multipath, activate_path);
1079 struct dm_path *path = &m->current_pgpath->path;
1080
1081 ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
1082 pg_init_done(path, ret);
1083}
1084
1066/* 1085/*
1067 * end_io handling 1086 * end_io handling
1068 */ 1087 */
1069static int do_end_io(struct multipath *m, struct bio *bio, 1088static int do_end_io(struct multipath *m, struct bio *bio,
1070 int error, struct dm_mpath_io *mpio) 1089 int error, struct dm_mpath_io *mpio)
1071{ 1090{
1072 struct hw_handler *hwh = &m->hw_handler;
1073 unsigned err_flags = MP_FAIL_PATH; /* Default behavior */
1074 unsigned long flags; 1091 unsigned long flags;
1075 1092
1076 if (!error) 1093 if (!error)
@@ -1097,19 +1114,8 @@ static int do_end_io(struct multipath *m, struct bio *bio,
1097 } 1114 }
1098 spin_unlock_irqrestore(&m->lock, flags); 1115 spin_unlock_irqrestore(&m->lock, flags);
1099 1116
1100 if (hwh->type && hwh->type->error) 1117 if (mpio->pgpath)
1101 err_flags = hwh->type->error(hwh, bio); 1118 fail_path(mpio->pgpath);
1102
1103 if (mpio->pgpath) {
1104 if (err_flags & MP_FAIL_PATH)
1105 fail_path(mpio->pgpath);
1106
1107 if (err_flags & MP_BYPASS_PG)
1108 bypass_pg(m, mpio->pgpath->pg, 1);
1109 }
1110
1111 if (err_flags & MP_ERROR_IO)
1112 return -EIO;
1113 1119
1114 requeue: 1120 requeue:
1115 dm_bio_restore(&mpio->details, bio); 1121 dm_bio_restore(&mpio->details, bio);
@@ -1194,7 +1200,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
1194 int sz = 0; 1200 int sz = 0;
1195 unsigned long flags; 1201 unsigned long flags;
1196 struct multipath *m = (struct multipath *) ti->private; 1202 struct multipath *m = (struct multipath *) ti->private;
1197 struct hw_handler *hwh = &m->hw_handler;
1198 struct priority_group *pg; 1203 struct priority_group *pg;
1199 struct pgpath *p; 1204 struct pgpath *p;
1200 unsigned pg_num; 1205 unsigned pg_num;
@@ -1214,12 +1219,10 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
1214 DMEMIT("pg_init_retries %u ", m->pg_init_retries); 1219 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1215 } 1220 }
1216 1221
1217 if (hwh->type && hwh->type->status) 1222 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1218 sz += hwh->type->status(hwh, type, result + sz, maxlen - sz);
1219 else if (!hwh->type || type == STATUSTYPE_INFO)
1220 DMEMIT("0 "); 1223 DMEMIT("0 ");
1221 else 1224 else
1222 DMEMIT("1 %s ", hwh->type->name); 1225 DMEMIT("1 %s ", m->hw_handler_name);
1223 1226
1224 DMEMIT("%u ", m->nr_priority_groups); 1227 DMEMIT("%u ", m->nr_priority_groups);
1225 1228
@@ -1422,6 +1425,21 @@ static int __init dm_multipath_init(void)
1422 return -ENOMEM; 1425 return -ENOMEM;
1423 } 1426 }
1424 1427
1428 /*
1429 * A separate workqueue is used to handle the device handlers
1430 * to avoid overloading existing workqueue. Overloading the
1431 * old workqueue would also create a bottleneck in the
1432 * path of the storage hardware device activation.
1433 */
1434 kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
1435 if (!kmpath_handlerd) {
1436 DMERR("failed to create workqueue kmpath_handlerd");
1437 destroy_workqueue(kmultipathd);
1438 dm_unregister_target(&multipath_target);
1439 kmem_cache_destroy(_mpio_cache);
1440 return -ENOMEM;
1441 }
1442
1425 DMINFO("version %u.%u.%u loaded", 1443 DMINFO("version %u.%u.%u loaded",
1426 multipath_target.version[0], multipath_target.version[1], 1444 multipath_target.version[0], multipath_target.version[1],
1427 multipath_target.version[2]); 1445 multipath_target.version[2]);
@@ -1433,6 +1451,7 @@ static void __exit dm_multipath_exit(void)
1433{ 1451{
1434 int r; 1452 int r;
1435 1453
1454 destroy_workqueue(kmpath_handlerd);
1436 destroy_workqueue(kmultipathd); 1455 destroy_workqueue(kmultipathd);
1437 1456
1438 r = dm_unregister_target(&multipath_target); 1457 r = dm_unregister_target(&multipath_target);
@@ -1441,8 +1460,6 @@ static void __exit dm_multipath_exit(void)
1441 kmem_cache_destroy(_mpio_cache); 1460 kmem_cache_destroy(_mpio_cache);
1442} 1461}
1443 1462
1444EXPORT_SYMBOL_GPL(dm_pg_init_complete);
1445
1446module_init(dm_multipath_init); 1463module_init(dm_multipath_init);
1447module_exit(dm_multipath_exit); 1464module_exit(dm_multipath_exit);
1448 1465
diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h
index b9cdcbb3ed59..c198b856a452 100644
--- a/drivers/md/dm-mpath.h
+++ b/drivers/md/dm-mpath.h
@@ -16,7 +16,6 @@ struct dm_path {
16 unsigned is_active; /* Read-only */ 16 unsigned is_active; /* Read-only */
17 17
18 void *pscontext; /* For path-selector use */ 18 void *pscontext; /* For path-selector use */
19 void *hwhcontext; /* For hw-handler use */
20}; 19};
21 20
22/* Callback for hwh_pg_init_fn to use when complete */ 21/* Callback for hwh_pg_init_fn to use when complete */
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index 1acbdd61b670..10b6ef758725 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2007 LSI Corporation. 2 * Copyright (c) 2000-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi.h 5 * Name: mpi.h
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 2bd8adae0f00..b2db3330c591 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2007 LSI Corporation. 2 * Copyright (c) 2000-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_cnfg.h 5 * Name: mpi_cnfg.h
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index d40d6d15ae20..75e599b85b64 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5,7 +5,7 @@
5 * For use with LSI PCI chip/adapter(s) 5 * For use with LSI PCI chip/adapter(s)
6 * running LSI Fusion MPT (Message Passing Technology) firmware. 6 * running LSI Fusion MPT (Message Passing Technology) firmware.
7 * 7 *
8 * Copyright (c) 1999-2007 LSI Corporation 8 * Copyright (c) 1999-2008 LSI Corporation
9 * (mailto:DL-MPTFusionLinux@lsi.com) 9 * (mailto:DL-MPTFusionLinux@lsi.com)
10 * 10 *
11 */ 11 */
@@ -103,7 +103,7 @@ static int mfcounter = 0;
103 * Public data... 103 * Public data...
104 */ 104 */
105 105
106struct proc_dir_entry *mpt_proc_root_dir; 106static struct proc_dir_entry *mpt_proc_root_dir;
107 107
108#define WHOINIT_UNKNOWN 0xAA 108#define WHOINIT_UNKNOWN 0xAA
109 109
@@ -253,6 +253,55 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
253 return 0; 253 return 0;
254} 254}
255 255
256/**
257 * mpt_fault_reset_work - work performed on workq after ioc fault
258 * @work: input argument, used to derive ioc
259 *
260**/
261static void
262mpt_fault_reset_work(struct work_struct *work)
263{
264 MPT_ADAPTER *ioc =
265 container_of(work, MPT_ADAPTER, fault_reset_work.work);
266 u32 ioc_raw_state;
267 int rc;
268 unsigned long flags;
269
270 if (ioc->diagPending || !ioc->active)
271 goto out;
272
273 ioc_raw_state = mpt_GetIocState(ioc, 0);
274 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
275 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
276 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
277 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
278 ioc->name, __FUNCTION__);
279 rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
280 printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
281 __FUNCTION__, (rc == 0) ? "success" : "failed");
282 ioc_raw_state = mpt_GetIocState(ioc, 0);
283 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
284 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
285 "reset (%04xh)\n", ioc->name, ioc_raw_state &
286 MPI_DOORBELL_DATA_MASK);
287 }
288
289 out:
290 /*
291 * Take turns polling alternate controller
292 */
293 if (ioc->alt_ioc)
294 ioc = ioc->alt_ioc;
295
296 /* rearm the timer */
297 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
298 if (ioc->reset_work_q)
299 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
300 msecs_to_jiffies(MPT_POLLING_INTERVAL));
301 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
302}
303
304
256/* 305/*
257 * Process turbo (context) reply... 306 * Process turbo (context) reply...
258 */ 307 */
@@ -1616,6 +1665,22 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1616 /* Find lookup slot. */ 1665 /* Find lookup slot. */
1617 INIT_LIST_HEAD(&ioc->list); 1666 INIT_LIST_HEAD(&ioc->list);
1618 1667
1668
1669 /* Initialize workqueue */
1670 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
1671 spin_lock_init(&ioc->fault_reset_work_lock);
1672
1673 snprintf(ioc->reset_work_q_name, KOBJ_NAME_LEN, "mpt_poll_%d", ioc->id);
1674 ioc->reset_work_q =
1675 create_singlethread_workqueue(ioc->reset_work_q_name);
1676 if (!ioc->reset_work_q) {
1677 printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
1678 ioc->name);
1679 pci_release_selected_regions(pdev, ioc->bars);
1680 kfree(ioc);
1681 return -ENOMEM;
1682 }
1683
1619 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n", 1684 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
1620 ioc->name, &ioc->facts, &ioc->pfacts[0])); 1685 ioc->name, &ioc->facts, &ioc->pfacts[0]));
1621 1686
@@ -1727,6 +1792,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1727 iounmap(ioc->memmap); 1792 iounmap(ioc->memmap);
1728 if (r != -5) 1793 if (r != -5)
1729 pci_release_selected_regions(pdev, ioc->bars); 1794 pci_release_selected_regions(pdev, ioc->bars);
1795
1796 destroy_workqueue(ioc->reset_work_q);
1797 ioc->reset_work_q = NULL;
1798
1730 kfree(ioc); 1799 kfree(ioc);
1731 pci_set_drvdata(pdev, NULL); 1800 pci_set_drvdata(pdev, NULL);
1732 return r; 1801 return r;
@@ -1759,6 +1828,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1759 } 1828 }
1760#endif 1829#endif
1761 1830
1831 if (!ioc->alt_ioc)
1832 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
1833 msecs_to_jiffies(MPT_POLLING_INTERVAL));
1834
1762 return 0; 1835 return 0;
1763} 1836}
1764 1837
@@ -1774,6 +1847,19 @@ mpt_detach(struct pci_dev *pdev)
1774 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 1847 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1775 char pname[32]; 1848 char pname[32];
1776 u8 cb_idx; 1849 u8 cb_idx;
1850 unsigned long flags;
1851 struct workqueue_struct *wq;
1852
1853 /*
1854 * Stop polling ioc for fault condition
1855 */
1856 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
1857 wq = ioc->reset_work_q;
1858 ioc->reset_work_q = NULL;
1859 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
1860 cancel_delayed_work(&ioc->fault_reset_work);
1861 destroy_workqueue(wq);
1862
1777 1863
1778 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); 1864 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
1779 remove_proc_entry(pname, NULL); 1865 remove_proc_entry(pname, NULL);
@@ -7456,7 +7542,6 @@ EXPORT_SYMBOL(mpt_resume);
7456EXPORT_SYMBOL(mpt_suspend); 7542EXPORT_SYMBOL(mpt_suspend);
7457#endif 7543#endif
7458EXPORT_SYMBOL(ioc_list); 7544EXPORT_SYMBOL(ioc_list);
7459EXPORT_SYMBOL(mpt_proc_root_dir);
7460EXPORT_SYMBOL(mpt_register); 7545EXPORT_SYMBOL(mpt_register);
7461EXPORT_SYMBOL(mpt_deregister); 7546EXPORT_SYMBOL(mpt_deregister);
7462EXPORT_SYMBOL(mpt_event_register); 7547EXPORT_SYMBOL(mpt_event_register);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index a8f617447d22..6adab648dbb9 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -5,7 +5,7 @@
5 * LSIFC9xx/LSI409xx Fibre Channel 5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Fusion MPT (Message Passing Technology) firmware. 6 * running LSI Fusion MPT (Message Passing Technology) firmware.
7 * 7 *
8 * Copyright (c) 1999-2007 LSI Corporation 8 * Copyright (c) 1999-2008 LSI Corporation
9 * (mailto:DL-MPTFusionLinux@lsi.com) 9 * (mailto:DL-MPTFusionLinux@lsi.com)
10 * 10 *
11 */ 11 */
@@ -73,11 +73,11 @@
73#endif 73#endif
74 74
75#ifndef COPYRIGHT 75#ifndef COPYRIGHT
76#define COPYRIGHT "Copyright (c) 1999-2007 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.06" 79#define MPT_LINUX_VERSION_COMMON "3.04.07"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.06" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -176,6 +176,8 @@
176/* debug print string length used for events and iocstatus */ 176/* debug print string length used for events and iocstatus */
177# define EVENT_DESCR_STR_SZ 100 177# define EVENT_DESCR_STR_SZ 100
178 178
179#define MPT_POLLING_INTERVAL 1000 /* in milliseconds */
180
179#ifdef __KERNEL__ /* { */ 181#ifdef __KERNEL__ /* { */
180/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 182/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
181 183
@@ -709,6 +711,12 @@ typedef struct _MPT_ADAPTER
709 struct workqueue_struct *fc_rescan_work_q; 711 struct workqueue_struct *fc_rescan_work_q;
710 struct scsi_cmnd **ScsiLookup; 712 struct scsi_cmnd **ScsiLookup;
711 spinlock_t scsi_lookup_lock; 713 spinlock_t scsi_lookup_lock;
714
715 char reset_work_q_name[KOBJ_NAME_LEN];
716 struct workqueue_struct *reset_work_q;
717 struct delayed_work fault_reset_work;
718 spinlock_t fault_reset_work_lock;
719
712} MPT_ADAPTER; 720} MPT_ADAPTER;
713 721
714/* 722/*
@@ -919,7 +927,6 @@ extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhys
919 * Public data decl's... 927 * Public data decl's...
920 */ 928 */
921extern struct list_head ioc_list; 929extern struct list_head ioc_list;
922extern struct proc_dir_entry *mpt_proc_root_dir;
923 930
924/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 931/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
925#endif /* } __KERNEL__ */ 932#endif /* } __KERNEL__ */
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index c5946560c4e2..a5920423e2b2 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -4,7 +4,7 @@
4 * For use with LSI PCI chip/adapters 4 * For use with LSI PCI chip/adapters
5 * running LSI Fusion MPT (Message Passing Technology) firmware. 5 * running LSI Fusion MPT (Message Passing Technology) firmware.
6 * 6 *
7 * Copyright (c) 1999-2007 LSI Corporation 7 * Copyright (c) 1999-2008 LSI Corporation
8 * (mailto:DL-MPTFusionLinux@lsi.com) 8 * (mailto:DL-MPTFusionLinux@lsi.com)
9 * 9 *
10 */ 10 */
@@ -66,7 +66,7 @@
66#include <scsi/scsi_host.h> 66#include <scsi/scsi_host.h>
67#include <scsi/scsi_tcq.h> 67#include <scsi/scsi_tcq.h>
68 68
69#define COPYRIGHT "Copyright (c) 1999-2007 LSI Corporation" 69#define COPYRIGHT "Copyright (c) 1999-2008 LSI Corporation"
70#define MODULEAUTHOR "LSI Corporation" 70#define MODULEAUTHOR "LSI Corporation"
71#include "mptbase.h" 71#include "mptbase.h"
72#include "mptctl.h" 72#include "mptctl.h"
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
index 2c1890127e15..d564cc9ada6a 100644
--- a/drivers/message/fusion/mptctl.h
+++ b/drivers/message/fusion/mptctl.h
@@ -5,7 +5,7 @@
5 * LSIFC9xx/LSI409xx Fibre Channel 5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Fusion MPT (Message Passing Technology) firmware. 6 * running LSI Fusion MPT (Message Passing Technology) firmware.
7 * 7 *
8 * Copyright (c) 1999-2007 LSI Corporation 8 * Copyright (c) 1999-2008 LSI Corporation
9 * (mailto:DL-MPTFusionLinux@lsi.com) 9 * (mailto:DL-MPTFusionLinux@lsi.com)
10 * 10 *
11 */ 11 */
diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h
index ffdb0a6191b4..510b9f492093 100644
--- a/drivers/message/fusion/mptdebug.h
+++ b/drivers/message/fusion/mptdebug.h
@@ -3,7 +3,7 @@
3 * For use with LSI PCI chip/adapter(s) 3 * For use with LSI PCI chip/adapter(s)
4 * running LSI Fusion MPT (Message Passing Technology) firmware. 4 * running LSI Fusion MPT (Message Passing Technology) firmware.
5 * 5 *
6 * Copyright (c) 1999-2007 LSI Corporation 6 * Copyright (c) 1999-2008 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 */ 9 */
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 1e24ab4ac38c..fc31ca6829d8 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -3,7 +3,7 @@
3 * For use with LSI PCI chip/adapter(s) 3 * For use with LSI PCI chip/adapter(s)
4 * running LSI Fusion MPT (Message Passing Technology) firmware. 4 * running LSI Fusion MPT (Message Passing Technology) firmware.
5 * 5 *
6 * Copyright (c) 1999-2007 LSI Corporation 6 * Copyright (c) 1999-2008 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 */ 9 */
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 7950fc678ed1..d709d92b7b30 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -4,7 +4,7 @@
4 * For use with LSI Fibre Channel PCI chip/adapters 4 * For use with LSI Fibre Channel PCI chip/adapters
5 * running LSI Fusion MPT (Message Passing Technology) firmware. 5 * running LSI Fusion MPT (Message Passing Technology) firmware.
6 * 6 *
7 * Copyright (c) 2000-2007 LSI Corporation 7 * Copyright (c) 2000-2008 LSI Corporation
8 * (mailto:DL-MPTFusionLinux@lsi.com) 8 * (mailto:DL-MPTFusionLinux@lsi.com)
9 * 9 *
10 */ 10 */
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index bafb67fc8181..33927ee7dc3b 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -4,7 +4,7 @@
4 * For use with LSI Fibre Channel PCI chip/adapters 4 * For use with LSI Fibre Channel PCI chip/adapters
5 * running LSI Fusion MPT (Message Passing Technology) firmware. 5 * running LSI Fusion MPT (Message Passing Technology) firmware.
6 * 6 *
7 * Copyright (c) 2000-2007 LSI Corporation 7 * Copyright (c) 2000-2008 LSI Corporation
8 * (mailto:DL-MPTFusionLinux@lsi.com) 8 * (mailto:DL-MPTFusionLinux@lsi.com)
9 * 9 *
10 */ 10 */
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 4d492ba232b0..b1147aa7afde 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -3,7 +3,7 @@
3 * For use with LSI PCI chip/adapter(s) 3 * For use with LSI PCI chip/adapter(s)
4 * running LSI Fusion MPT (Message Passing Technology) firmware. 4 * running LSI Fusion MPT (Message Passing Technology) firmware.
5 * 5 *
6 * Copyright (c) 1999-2007 LSI Corporation 6 * Copyright (c) 1999-2008 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 */ 8 */
9/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 9/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 7c150f50629a..2b544e0877e6 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -5,7 +5,7 @@
5 * LSIFC9xx/LSI409xx Fibre Channel 5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI MPT (Message Passing Technology) firmware. 6 * running LSI MPT (Message Passing Technology) firmware.
7 * 7 *
8 * Copyright (c) 1999-2007 LSI Corporation 8 * Copyright (c) 1999-2008 LSI Corporation
9 * (mailto:DL-MPTFusionLinux@lsi.com) 9 * (mailto:DL-MPTFusionLinux@lsi.com)
10 * 10 *
11 */ 11 */
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index c68ef00c2f92..d142b6b4b976 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -3,7 +3,7 @@
3 * For use with LSI PCI chip/adapter(s) 3 * For use with LSI PCI chip/adapter(s)
4 * running LSI Fusion MPT (Message Passing Technology) firmware. 4 * running LSI Fusion MPT (Message Passing Technology) firmware.
5 * 5 *
6 * Copyright (c) 1999-2007 LSI Corporation 6 * Copyright (c) 1999-2008 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 */ 9 */
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 7ea7da0e090c..319aa3033371 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -5,7 +5,7 @@
5 * LSIFC9xx/LSI409xx Fibre Channel 5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Fusion MPT (Message Passing Technology) firmware. 6 * running LSI Fusion MPT (Message Passing Technology) firmware.
7 * 7 *
8 * Copyright (c) 1999-2007 LSI Corporation 8 * Copyright (c) 1999-2008 LSI Corporation
9 * (mailto:DL-MPTFusionLinux@lsi.com) 9 * (mailto:DL-MPTFusionLinux@lsi.com)
10 * 10 *
11 */ 11 */
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 1effca4e40e1..61620144e49c 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -3,7 +3,7 @@
3 * For use with LSI PCI chip/adapter(s) 3 * For use with LSI PCI chip/adapter(s)
4 * running LSI Fusion MPT (Message Passing Technology) firmware. 4 * running LSI Fusion MPT (Message Passing Technology) firmware.
5 * 5 *
6 * Copyright (c) 1999-2007 LSI Corporation 6 * Copyright (c) 1999-2008 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 */ 9 */
@@ -447,6 +447,7 @@ static int mptspi_target_alloc(struct scsi_target *starget)
447 spi_max_offset(starget) = ioc->spi_data.maxSyncOffset; 447 spi_max_offset(starget) = ioc->spi_data.maxSyncOffset;
448 448
449 spi_offset(starget) = 0; 449 spi_offset(starget) = 0;
450 spi_period(starget) = 0xFF;
450 mptspi_write_width(starget, 0); 451 mptspi_write_width(starget, 0);
451 452
452 return 0; 453 return 0;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 636af2862308..1921b8dbb242 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -179,17 +179,29 @@ config FUJITSU_LAPTOP
179 tristate "Fujitsu Laptop Extras" 179 tristate "Fujitsu Laptop Extras"
180 depends on X86 180 depends on X86
181 depends on ACPI 181 depends on ACPI
182 depends on INPUT
182 depends on BACKLIGHT_CLASS_DEVICE 183 depends on BACKLIGHT_CLASS_DEVICE
183 ---help--- 184 ---help---
184 This is a driver for laptops built by Fujitsu: 185 This is a driver for laptops built by Fujitsu:
185 186
186 * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks 187 * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks
187 * Possibly other Fujitsu laptop models 188 * Possibly other Fujitsu laptop models
189 * Tested with S6410 and S7020
188 190
189 It adds support for LCD brightness control. 191 It adds support for LCD brightness control and some hotkeys.
190 192
191 If you have a Fujitsu laptop, say Y or M here. 193 If you have a Fujitsu laptop, say Y or M here.
192 194
195config FUJITSU_LAPTOP_DEBUG
196 bool "Verbose debug mode for Fujitsu Laptop Extras"
197 depends on FUJITSU_LAPTOP
198 default n
199 ---help---
200 Enables extra debug output from the fujitsu extras driver, at the
201 expense of a slight increase in driver size.
202
203 If you are not sure, say N here.
204
193config TC1100_WMI 205config TC1100_WMI
194 tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)" 206 tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)"
195 depends on X86 && !X86_64 207 depends on X86 && !X86_64
@@ -219,6 +231,23 @@ config MSI_LAPTOP
219 231
220 If you have an MSI S270 laptop, say Y or M here. 232 If you have an MSI S270 laptop, say Y or M here.
221 233
234config COMPAL_LAPTOP
235 tristate "Compal Laptop Extras"
236 depends on X86
237 depends on ACPI_EC
238 depends on BACKLIGHT_CLASS_DEVICE
239 ---help---
240 This is a driver for laptops built by Compal:
241
242 Compal FL90/IFL90
243 Compal FL91/IFL91
244 Compal FL92/JFL92
245 Compal FT00/IFT00
246
247 It adds support for Bluetooth, WLAN and LCD brightness control.
248
249 If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here.
250
222config SONY_LAPTOP 251config SONY_LAPTOP
223 tristate "Sony Laptop Extras" 252 tristate "Sony Laptop Extras"
224 depends on X86 && ACPI 253 depends on X86 && ACPI
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 1952875a272e..a6dac6a2e7e5 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -5,10 +5,11 @@ obj- := misc.o # Dummy rule to force built-in.o to be made
5 5
6obj-$(CONFIG_IBM_ASM) += ibmasm/ 6obj-$(CONFIG_IBM_ASM) += ibmasm/
7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ 7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
9obj-$(CONFIG_ACER_WMI) += acer-wmi.o
10obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o 8obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
11obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o 9obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
10obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
11obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
12obj-$(CONFIG_ACER_WMI) += acer-wmi.o
12obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o 13obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
13obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o 14obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
14obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o 15obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
index dd13a3749927..e7a3fe508dff 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/misc/acer-wmi.c
@@ -22,18 +22,18 @@
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25#define ACER_WMI_VERSION "0.1"
26
27#include <linux/kernel.h> 25#include <linux/kernel.h>
28#include <linux/module.h> 26#include <linux/module.h>
29#include <linux/init.h> 27#include <linux/init.h>
30#include <linux/types.h> 28#include <linux/types.h>
31#include <linux/dmi.h> 29#include <linux/dmi.h>
30#include <linux/fb.h>
32#include <linux/backlight.h> 31#include <linux/backlight.h>
33#include <linux/leds.h> 32#include <linux/leds.h>
34#include <linux/platform_device.h> 33#include <linux/platform_device.h>
35#include <linux/acpi.h> 34#include <linux/acpi.h>
36#include <linux/i8042.h> 35#include <linux/i8042.h>
36#include <linux/debugfs.h>
37 37
38#include <acpi/acpi_drivers.h> 38#include <acpi/acpi_drivers.h>
39 39
@@ -87,6 +87,7 @@ struct acer_quirks {
87 * Acer ACPI method GUIDs 87 * Acer ACPI method GUIDs
88 */ 88 */
89#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" 89#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
90#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
90#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" 91#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
91#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" 92#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
92 93
@@ -150,6 +151,12 @@ struct acer_data {
150 int brightness; 151 int brightness;
151}; 152};
152 153
154struct acer_debug {
155 struct dentry *root;
156 struct dentry *devices;
157 u32 wmid_devices;
158};
159
153/* Each low-level interface must define at least some of the following */ 160/* Each low-level interface must define at least some of the following */
154struct wmi_interface { 161struct wmi_interface {
155 /* The WMI device type */ 162 /* The WMI device type */
@@ -160,6 +167,9 @@ struct wmi_interface {
160 167
161 /* Private data for the current interface */ 168 /* Private data for the current interface */
162 struct acer_data data; 169 struct acer_data data;
170
171 /* debugfs entries associated with this interface */
172 struct acer_debug debug;
163}; 173};
164 174
165/* The static interface pointer, points to the currently detected interface */ 175/* The static interface pointer, points to the currently detected interface */
@@ -174,7 +184,7 @@ static struct wmi_interface *interface;
174struct quirk_entry { 184struct quirk_entry {
175 u8 wireless; 185 u8 wireless;
176 u8 mailled; 186 u8 mailled;
177 u8 brightness; 187 s8 brightness;
178 u8 bluetooth; 188 u8 bluetooth;
179}; 189};
180 190
@@ -198,6 +208,10 @@ static int dmi_matched(const struct dmi_system_id *dmi)
198static struct quirk_entry quirk_unknown = { 208static struct quirk_entry quirk_unknown = {
199}; 209};
200 210
211static struct quirk_entry quirk_acer_aspire_1520 = {
212 .brightness = -1,
213};
214
201static struct quirk_entry quirk_acer_travelmate_2490 = { 215static struct quirk_entry quirk_acer_travelmate_2490 = {
202 .mailled = 1, 216 .mailled = 1,
203}; 217};
@@ -207,9 +221,31 @@ static struct quirk_entry quirk_medion_md_98300 = {
207 .wireless = 1, 221 .wireless = 1,
208}; 222};
209 223
224static struct quirk_entry quirk_fujitsu_amilo_li_1718 = {
225 .wireless = 2,
226};
227
210static struct dmi_system_id acer_quirks[] = { 228static struct dmi_system_id acer_quirks[] = {
211 { 229 {
212 .callback = dmi_matched, 230 .callback = dmi_matched,
231 .ident = "Acer Aspire 1360",
232 .matches = {
233 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
234 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
235 },
236 .driver_data = &quirk_acer_aspire_1520,
237 },
238 {
239 .callback = dmi_matched,
240 .ident = "Acer Aspire 1520",
241 .matches = {
242 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
243 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1520"),
244 },
245 .driver_data = &quirk_acer_aspire_1520,
246 },
247 {
248 .callback = dmi_matched,
213 .ident = "Acer Aspire 3100", 249 .ident = "Acer Aspire 3100",
214 .matches = { 250 .matches = {
215 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 251 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -300,6 +336,15 @@ static struct dmi_system_id acer_quirks[] = {
300 }, 336 },
301 { 337 {
302 .callback = dmi_matched, 338 .callback = dmi_matched,
339 .ident = "Fujitsu Siemens Amilo Li 1718",
340 .matches = {
341 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
342 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Li 1718"),
343 },
344 .driver_data = &quirk_fujitsu_amilo_li_1718,
345 },
346 {
347 .callback = dmi_matched,
303 .ident = "Medion MD 98300", 348 .ident = "Medion MD 98300",
304 .matches = { 349 .matches = {
305 DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), 350 DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
@@ -393,6 +438,12 @@ struct wmi_interface *iface)
393 return AE_ERROR; 438 return AE_ERROR;
394 *value = result & 0x1; 439 *value = result & 0x1;
395 return AE_OK; 440 return AE_OK;
441 case 2:
442 err = ec_read(0x71, &result);
443 if (err)
444 return AE_ERROR;
445 *value = result & 0x1;
446 return AE_OK;
396 default: 447 default:
397 err = ec_read(0xA, &result); 448 err = ec_read(0xA, &result);
398 if (err) 449 if (err)
@@ -506,6 +557,15 @@ static acpi_status AMW0_set_capabilities(void)
506 struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; 557 struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
507 union acpi_object *obj; 558 union acpi_object *obj;
508 559
560 /*
561 * On laptops with this strange GUID (non Acer), normal probing doesn't
562 * work.
563 */
564 if (wmi_has_guid(AMW0_GUID2)) {
565 interface->capability |= ACER_CAP_WIRELESS;
566 return AE_OK;
567 }
568
509 args.eax = ACER_AMW0_WRITE; 569 args.eax = ACER_AMW0_WRITE;
510 args.ecx = args.edx = 0; 570 args.ecx = args.edx = 0;
511 571
@@ -552,7 +612,8 @@ static acpi_status AMW0_set_capabilities(void)
552 * appear to use the same EC register for brightness, even if they 612 * appear to use the same EC register for brightness, even if they
553 * differ for wireless, etc 613 * differ for wireless, etc
554 */ 614 */
555 interface->capability |= ACER_CAP_BRIGHTNESS; 615 if (quirks->brightness >= 0)
616 interface->capability |= ACER_CAP_BRIGHTNESS;
556 617
557 return AE_OK; 618 return AE_OK;
558} 619}
@@ -807,7 +868,15 @@ static int read_brightness(struct backlight_device *bd)
807 868
808static int update_bl_status(struct backlight_device *bd) 869static int update_bl_status(struct backlight_device *bd)
809{ 870{
810 set_u32(bd->props.brightness, ACER_CAP_BRIGHTNESS); 871 int intensity = bd->props.brightness;
872
873 if (bd->props.power != FB_BLANK_UNBLANK)
874 intensity = 0;
875 if (bd->props.fb_blank != FB_BLANK_UNBLANK)
876 intensity = 0;
877
878 set_u32(intensity, ACER_CAP_BRIGHTNESS);
879
811 return 0; 880 return 0;
812} 881}
813 882
@@ -829,8 +898,9 @@ static int __devinit acer_backlight_init(struct device *dev)
829 898
830 acer_backlight_device = bd; 899 acer_backlight_device = bd;
831 900
901 bd->props.power = FB_BLANK_UNBLANK;
902 bd->props.brightness = max_brightness;
832 bd->props.max_brightness = max_brightness; 903 bd->props.max_brightness = max_brightness;
833 bd->props.brightness = read_brightness(NULL);
834 backlight_update_status(bd); 904 backlight_update_status(bd);
835 return 0; 905 return 0;
836} 906}
@@ -894,6 +964,28 @@ static DEVICE_ATTR(interface, S_IWUGO | S_IRUGO | S_IWUSR,
894 show_interface, NULL); 964 show_interface, NULL);
895 965
896/* 966/*
967 * debugfs functions
968 */
969static u32 get_wmid_devices(void)
970{
971 struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
972 union acpi_object *obj;
973 acpi_status status;
974
975 status = wmi_query_block(WMID_GUID2, 1, &out);
976 if (ACPI_FAILURE(status))
977 return 0;
978
979 obj = (union acpi_object *) out.pointer;
980 if (obj && obj->type == ACPI_TYPE_BUFFER &&
981 obj->buffer.length == sizeof(u32)) {
982 return *((u32 *) obj->buffer.pointer);
983 } else {
984 return 0;
985 }
986}
987
988/*
897 * Platform device 989 * Platform device
898 */ 990 */
899static int __devinit acer_platform_probe(struct platform_device *device) 991static int __devinit acer_platform_probe(struct platform_device *device)
@@ -1052,12 +1144,40 @@ error_sysfs:
1052 return retval; 1144 return retval;
1053} 1145}
1054 1146
1147static void remove_debugfs(void)
1148{
1149 debugfs_remove(interface->debug.devices);
1150 debugfs_remove(interface->debug.root);
1151}
1152
1153static int create_debugfs(void)
1154{
1155 interface->debug.root = debugfs_create_dir("acer-wmi", NULL);
1156 if (!interface->debug.root) {
1157 printk(ACER_ERR "Failed to create debugfs directory");
1158 return -ENOMEM;
1159 }
1160
1161 interface->debug.devices = debugfs_create_u32("devices", S_IRUGO,
1162 interface->debug.root,
1163 &interface->debug.wmid_devices);
1164 if (!interface->debug.devices)
1165 goto error_debugfs;
1166
1167 return 0;
1168
1169error_debugfs:
1170 remove_debugfs();
1171 return -ENOMEM;
1172}
1173
1055static int __init acer_wmi_init(void) 1174static int __init acer_wmi_init(void)
1056{ 1175{
1057 int err; 1176 int err;
1058 1177
1059 printk(ACER_INFO "Acer Laptop ACPI-WMI Extras version %s\n", 1178 printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n");
1060 ACER_WMI_VERSION); 1179
1180 find_quirks();
1061 1181
1062 /* 1182 /*
1063 * Detect which ACPI-WMI interface we're using. 1183 * Detect which ACPI-WMI interface we're using.
@@ -1092,8 +1212,6 @@ static int __init acer_wmi_init(void)
1092 if (wmi_has_guid(AMW0_GUID1)) 1212 if (wmi_has_guid(AMW0_GUID1))
1093 AMW0_find_mailled(); 1213 AMW0_find_mailled();
1094 1214
1095 find_quirks();
1096
1097 if (!interface) { 1215 if (!interface) {
1098 printk(ACER_ERR "No or unsupported WMI interface, unable to " 1216 printk(ACER_ERR "No or unsupported WMI interface, unable to "
1099 "load\n"); 1217 "load\n");
@@ -1111,6 +1229,13 @@ static int __init acer_wmi_init(void)
1111 if (err) 1229 if (err)
1112 return err; 1230 return err;
1113 1231
1232 if (wmi_has_guid(WMID_GUID2)) {
1233 interface->debug.wmid_devices = get_wmid_devices();
1234 err = create_debugfs();
1235 if (err)
1236 return err;
1237 }
1238
1114 /* Override any initial settings with values from the commandline */ 1239 /* Override any initial settings with values from the commandline */
1115 acer_commandline_init(); 1240 acer_commandline_init();
1116 1241
diff --git a/drivers/misc/compal-laptop.c b/drivers/misc/compal-laptop.c
new file mode 100644
index 000000000000..344b790a6253
--- /dev/null
+++ b/drivers/misc/compal-laptop.c
@@ -0,0 +1,404 @@
1/*-*-linux-c-*-*/
2
3/*
4 Copyright (C) 2008 Cezary Jackiewicz <cezary.jackiewicz (at) gmail.com>
5
6 based on MSI driver
7
8 Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful, but
16 WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 02110-1301, USA.
24 */
25
26/*
27 * comapl-laptop.c - Compal laptop support.
28 *
29 * This driver exports a few files in /sys/devices/platform/compal-laptop/:
30 *
31 * wlan - wlan subsystem state: contains 0 or 1 (rw)
32 *
33 * bluetooth - Bluetooth subsystem state: contains 0 or 1 (rw)
34 *
35 * raw - raw value taken from embedded controller register (ro)
36 *
37 * In addition to these platform device attributes the driver
38 * registers itself in the Linux backlight control subsystem and is
39 * available to userspace under /sys/class/backlight/compal-laptop/.
40 *
41 * This driver might work on other laptops produced by Compal. If you
42 * want to try it you can pass force=1 as argument to the module which
43 * will force it to load even when the DMI data doesn't identify the
44 * laptop as FL9x.
45 */
46
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/acpi.h>
51#include <linux/dmi.h>
52#include <linux/backlight.h>
53#include <linux/platform_device.h>
54#include <linux/autoconf.h>
55
56#define COMPAL_DRIVER_VERSION "0.2.6"
57
58#define COMPAL_LCD_LEVEL_MAX 8
59
60#define COMPAL_EC_COMMAND_WIRELESS 0xBB
61#define COMPAL_EC_COMMAND_LCD_LEVEL 0xB9
62
63#define KILLSWITCH_MASK 0x10
64#define WLAN_MASK 0x01
65#define BT_MASK 0x02
66
67static int force;
68module_param(force, bool, 0);
69MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
70
71/* Hardware access */
72
73static int set_lcd_level(int level)
74{
75 if (level < 0 || level >= COMPAL_LCD_LEVEL_MAX)
76 return -EINVAL;
77
78 ec_write(COMPAL_EC_COMMAND_LCD_LEVEL, level);
79
80 return 0;
81}
82
83static int get_lcd_level(void)
84{
85 u8 result;
86
87 ec_read(COMPAL_EC_COMMAND_LCD_LEVEL, &result);
88
89 return (int) result;
90}
91
92static int set_wlan_state(int state)
93{
94 u8 result, value;
95
96 ec_read(COMPAL_EC_COMMAND_WIRELESS, &result);
97
98 if ((result & KILLSWITCH_MASK) == 0)
99 return -EINVAL;
100 else {
101 if (state)
102 value = (u8) (result | WLAN_MASK);
103 else
104 value = (u8) (result & ~WLAN_MASK);
105 ec_write(COMPAL_EC_COMMAND_WIRELESS, value);
106 }
107
108 return 0;
109}
110
111static int set_bluetooth_state(int state)
112{
113 u8 result, value;
114
115 ec_read(COMPAL_EC_COMMAND_WIRELESS, &result);
116
117 if ((result & KILLSWITCH_MASK) == 0)
118 return -EINVAL;
119 else {
120 if (state)
121 value = (u8) (result | BT_MASK);
122 else
123 value = (u8) (result & ~BT_MASK);
124 ec_write(COMPAL_EC_COMMAND_WIRELESS, value);
125 }
126
127 return 0;
128}
129
130static int get_wireless_state(int *wlan, int *bluetooth)
131{
132 u8 result;
133
134 ec_read(COMPAL_EC_COMMAND_WIRELESS, &result);
135
136 if (wlan) {
137 if ((result & KILLSWITCH_MASK) == 0)
138 *wlan = 0;
139 else
140 *wlan = result & WLAN_MASK;
141 }
142
143 if (bluetooth) {
144 if ((result & KILLSWITCH_MASK) == 0)
145 *bluetooth = 0;
146 else
147 *bluetooth = (result & BT_MASK) >> 1;
148 }
149
150 return 0;
151}
152
153/* Backlight device stuff */
154
155static int bl_get_brightness(struct backlight_device *b)
156{
157 return get_lcd_level();
158}
159
160
161static int bl_update_status(struct backlight_device *b)
162{
163 return set_lcd_level(b->props.brightness);
164}
165
166static struct backlight_ops compalbl_ops = {
167 .get_brightness = bl_get_brightness,
168 .update_status = bl_update_status,
169};
170
171static struct backlight_device *compalbl_device;
172
173/* Platform device */
174
175static ssize_t show_wlan(struct device *dev,
176 struct device_attribute *attr, char *buf)
177{
178 int ret, enabled;
179
180 ret = get_wireless_state(&enabled, NULL);
181 if (ret < 0)
182 return ret;
183
184 return sprintf(buf, "%i\n", enabled);
185}
186
187static ssize_t show_raw(struct device *dev,
188 struct device_attribute *attr, char *buf)
189{
190 u8 result;
191
192 ec_read(COMPAL_EC_COMMAND_WIRELESS, &result);
193
194 return sprintf(buf, "%i\n", result);
195}
196
197static ssize_t show_bluetooth(struct device *dev,
198 struct device_attribute *attr, char *buf)
199{
200 int ret, enabled;
201
202 ret = get_wireless_state(NULL, &enabled);
203 if (ret < 0)
204 return ret;
205
206 return sprintf(buf, "%i\n", enabled);
207}
208
209static ssize_t store_wlan_state(struct device *dev,
210 struct device_attribute *attr, const char *buf, size_t count)
211{
212 int state, ret;
213
214 if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1))
215 return -EINVAL;
216
217 ret = set_wlan_state(state);
218 if (ret < 0)
219 return ret;
220
221 return count;
222}
223
224static ssize_t store_bluetooth_state(struct device *dev,
225 struct device_attribute *attr, const char *buf, size_t count)
226{
227 int state, ret;
228
229 if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1))
230 return -EINVAL;
231
232 ret = set_bluetooth_state(state);
233 if (ret < 0)
234 return ret;
235
236 return count;
237}
238
239static DEVICE_ATTR(bluetooth, 0644, show_bluetooth, store_bluetooth_state);
240static DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan_state);
241static DEVICE_ATTR(raw, 0444, show_raw, NULL);
242
243static struct attribute *compal_attributes[] = {
244 &dev_attr_bluetooth.attr,
245 &dev_attr_wlan.attr,
246 &dev_attr_raw.attr,
247 NULL
248};
249
250static struct attribute_group compal_attribute_group = {
251 .attrs = compal_attributes
252};
253
254static struct platform_driver compal_driver = {
255 .driver = {
256 .name = "compal-laptop",
257 .owner = THIS_MODULE,
258 }
259};
260
261static struct platform_device *compal_device;
262
263/* Initialization */
264
265static int dmi_check_cb(const struct dmi_system_id *id)
266{
267 printk(KERN_INFO "compal-laptop: Identified laptop model '%s'.\n",
268 id->ident);
269
270 return 0;
271}
272
273static struct dmi_system_id __initdata compal_dmi_table[] = {
274 {
275 .ident = "FL90/IFL90",
276 .matches = {
277 DMI_MATCH(DMI_BOARD_NAME, "IFL90"),
278 DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
279 },
280 .callback = dmi_check_cb
281 },
282 {
283 .ident = "FL90/IFL90",
284 .matches = {
285 DMI_MATCH(DMI_BOARD_NAME, "IFL90"),
286 DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"),
287 },
288 .callback = dmi_check_cb
289 },
290 {
291 .ident = "FL91/IFL91",
292 .matches = {
293 DMI_MATCH(DMI_BOARD_NAME, "IFL91"),
294 DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
295 },
296 .callback = dmi_check_cb
297 },
298 {
299 .ident = "FL92/JFL92",
300 .matches = {
301 DMI_MATCH(DMI_BOARD_NAME, "JFL92"),
302 DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
303 },
304 .callback = dmi_check_cb
305 },
306 {
307 .ident = "FT00/IFT00",
308 .matches = {
309 DMI_MATCH(DMI_BOARD_NAME, "IFT00"),
310 DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
311 },
312 .callback = dmi_check_cb
313 },
314 { }
315};
316
317static int __init compal_init(void)
318{
319 int ret;
320
321 if (acpi_disabled)
322 return -ENODEV;
323
324 if (!force && !dmi_check_system(compal_dmi_table))
325 return -ENODEV;
326
327 /* Register backlight stuff */
328
329 compalbl_device = backlight_device_register("compal-laptop", NULL, NULL,
330 &compalbl_ops);
331 if (IS_ERR(compalbl_device))
332 return PTR_ERR(compalbl_device);
333
334 compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1;
335
336 ret = platform_driver_register(&compal_driver);
337 if (ret)
338 goto fail_backlight;
339
340 /* Register platform stuff */
341
342 compal_device = platform_device_alloc("compal-laptop", -1);
343 if (!compal_device) {
344 ret = -ENOMEM;
345 goto fail_platform_driver;
346 }
347
348 ret = platform_device_add(compal_device);
349 if (ret)
350 goto fail_platform_device1;
351
352 ret = sysfs_create_group(&compal_device->dev.kobj,
353 &compal_attribute_group);
354 if (ret)
355 goto fail_platform_device2;
356
357 printk(KERN_INFO "compal-laptop: driver "COMPAL_DRIVER_VERSION
358 " successfully loaded.\n");
359
360 return 0;
361
362fail_platform_device2:
363
364 platform_device_del(compal_device);
365
366fail_platform_device1:
367
368 platform_device_put(compal_device);
369
370fail_platform_driver:
371
372 platform_driver_unregister(&compal_driver);
373
374fail_backlight:
375
376 backlight_device_unregister(compalbl_device);
377
378 return ret;
379}
380
381static void __exit compal_cleanup(void)
382{
383
384 sysfs_remove_group(&compal_device->dev.kobj, &compal_attribute_group);
385 platform_device_unregister(compal_device);
386 platform_driver_unregister(&compal_driver);
387 backlight_device_unregister(compalbl_device);
388
389 printk(KERN_INFO "compal-laptop: driver unloaded.\n");
390}
391
392module_init(compal_init);
393module_exit(compal_cleanup);
394
395MODULE_AUTHOR("Cezary Jackiewicz");
396MODULE_DESCRIPTION("Compal Laptop Support");
397MODULE_VERSION(COMPAL_DRIVER_VERSION);
398MODULE_LICENSE("GPL");
399
400MODULE_ALIAS("dmi:*:rnIFL90:rvrIFT00:*");
401MODULE_ALIAS("dmi:*:rnIFL90:rvrREFERENCE:*");
402MODULE_ALIAS("dmi:*:rnIFL91:rvrIFT00:*");
403MODULE_ALIAS("dmi:*:rnJFL92:rvrIFT00:*");
404MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*");
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
index 6d727609097f..9e8d79e7e9f4 100644
--- a/drivers/misc/eeepc-laptop.c
+++ b/drivers/misc/eeepc-laptop.c
@@ -87,7 +87,7 @@ enum {
87 CM_ASL_LID 87 CM_ASL_LID
88}; 88};
89 89
90const char *cm_getv[] = { 90static const char *cm_getv[] = {
91 "WLDG", NULL, NULL, NULL, 91 "WLDG", NULL, NULL, NULL,
92 "CAMG", NULL, NULL, NULL, 92 "CAMG", NULL, NULL, NULL,
93 NULL, "PBLG", NULL, NULL, 93 NULL, "PBLG", NULL, NULL,
@@ -96,7 +96,7 @@ const char *cm_getv[] = {
96 "CRDG", "LIDG" 96 "CRDG", "LIDG"
97}; 97};
98 98
99const char *cm_setv[] = { 99static const char *cm_setv[] = {
100 "WLDS", NULL, NULL, NULL, 100 "WLDS", NULL, NULL, NULL,
101 "CAMS", NULL, NULL, NULL, 101 "CAMS", NULL, NULL, NULL,
102 "SDSP", "PBLS", "HDPS", NULL, 102 "SDSP", "PBLS", "HDPS", NULL,
diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c
index 6d14e8fe1537..7a1ef6c262de 100644
--- a/drivers/misc/fujitsu-laptop.c
+++ b/drivers/misc/fujitsu-laptop.c
@@ -1,12 +1,14 @@
1/*-*-linux-c-*-*/ 1/*-*-linux-c-*-*/
2 2
3/* 3/*
4 Copyright (C) 2007 Jonathan Woithe <jwoithe@physics.adelaide.edu.au> 4 Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
5 Copyright (C) 2008 Peter Gruber <nokos@gmx.net>
5 Based on earlier work: 6 Based on earlier work:
6 Copyright (C) 2003 Shane Spencer <shane@bogomip.com> 7 Copyright (C) 2003 Shane Spencer <shane@bogomip.com>
7 Adrian Yee <brewt-fujitsu@brewt.org> 8 Adrian Yee <brewt-fujitsu@brewt.org>
8 9
9 Templated from msi-laptop.c which is copyright by its respective authors. 10 Templated from msi-laptop.c and thinkpad_acpi.c which is copyright
11 by its respective authors.
10 12
11 This program is free software; you can redistribute it and/or modify 13 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by 14 it under the terms of the GNU General Public License as published by
@@ -39,8 +41,17 @@
39 * registers itself in the Linux backlight control subsystem and is 41 * registers itself in the Linux backlight control subsystem and is
40 * available to userspace under /sys/class/backlight/fujitsu-laptop/. 42 * available to userspace under /sys/class/backlight/fujitsu-laptop/.
41 * 43 *
42 * This driver has been tested on a Fujitsu Lifebook S7020. It should 44 * Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are
43 * work on most P-series and S-series Lifebooks, but YMMV. 45 * also supported by this driver.
46 *
47 * This driver has been tested on a Fujitsu Lifebook S6410 and S7020. It
48 * should work on most P-series and S-series Lifebooks, but YMMV.
49 *
50 * The module parameter use_alt_lcd_levels switches between different ACPI
51 * brightness controls which are used by different Fujitsu laptops. In most
52 * cases the correct method is automatically detected. "use_alt_lcd_levels=1"
53 * is applicable for a Fujitsu Lifebook S6410 if autodetection fails.
54 *
44 */ 55 */
45 56
46#include <linux/module.h> 57#include <linux/module.h>
@@ -49,30 +60,105 @@
49#include <linux/acpi.h> 60#include <linux/acpi.h>
50#include <linux/dmi.h> 61#include <linux/dmi.h>
51#include <linux/backlight.h> 62#include <linux/backlight.h>
63#include <linux/input.h>
64#include <linux/kfifo.h>
65#include <linux/video_output.h>
52#include <linux/platform_device.h> 66#include <linux/platform_device.h>
53 67
54#define FUJITSU_DRIVER_VERSION "0.3" 68#define FUJITSU_DRIVER_VERSION "0.4.2"
55 69
56#define FUJITSU_LCD_N_LEVELS 8 70#define FUJITSU_LCD_N_LEVELS 8
57 71
58#define ACPI_FUJITSU_CLASS "fujitsu" 72#define ACPI_FUJITSU_CLASS "fujitsu"
59#define ACPI_FUJITSU_HID "FUJ02B1" 73#define ACPI_FUJITSU_HID "FUJ02B1"
60#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI extras driver" 74#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
61#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1" 75#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1"
62 76#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3"
77#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
78#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3"
79
80#define ACPI_FUJITSU_NOTIFY_CODE1 0x80
81
82#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86
83#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
84
85/* Hotkey details */
86#define LOCK_KEY 0x410 /* codes for the keys in the GIRB register */
87#define DISPLAY_KEY 0x411 /* keys are mapped to KEY_SCREENLOCK (the key with the key symbol) */
88#define ENERGY_KEY 0x412 /* KEY_MEDIA (the key with the laptop symbol, KEY_EMAIL (E key)) */
89#define REST_KEY 0x413 /* KEY_SUSPEND (R key) */
90
91#define MAX_HOTKEY_RINGBUFFER_SIZE 100
92#define RINGBUFFERSIZE 40
93
94/* Debugging */
95#define FUJLAPTOP_LOG ACPI_FUJITSU_HID ": "
96#define FUJLAPTOP_ERR KERN_ERR FUJLAPTOP_LOG
97#define FUJLAPTOP_NOTICE KERN_NOTICE FUJLAPTOP_LOG
98#define FUJLAPTOP_INFO KERN_INFO FUJLAPTOP_LOG
99#define FUJLAPTOP_DEBUG KERN_DEBUG FUJLAPTOP_LOG
100
101#define FUJLAPTOP_DBG_ALL 0xffff
102#define FUJLAPTOP_DBG_ERROR 0x0001
103#define FUJLAPTOP_DBG_WARN 0x0002
104#define FUJLAPTOP_DBG_INFO 0x0004
105#define FUJLAPTOP_DBG_TRACE 0x0008
106
107#define dbg_printk(a_dbg_level, format, arg...) \
108 do { if (dbg_level & a_dbg_level) \
109 printk(FUJLAPTOP_DEBUG "%s: " format, __func__ , ## arg); \
110 } while (0)
111#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
112#define vdbg_printk(a_dbg_level, format, arg...) \
113 dbg_printk(a_dbg_level, format, ## arg)
114#else
115#define vdbg_printk(a_dbg_level, format, arg...)
116#endif
117
118/* Device controlling the backlight and associated keys */
63struct fujitsu_t { 119struct fujitsu_t {
64 acpi_handle acpi_handle; 120 acpi_handle acpi_handle;
121 struct acpi_device *dev;
122 struct input_dev *input;
123 char phys[32];
65 struct backlight_device *bl_device; 124 struct backlight_device *bl_device;
66 struct platform_device *pf_device; 125 struct platform_device *pf_device;
67 126
68 unsigned long fuj02b1_state; 127 unsigned int max_brightness;
69 unsigned int brightness_changed; 128 unsigned int brightness_changed;
70 unsigned int brightness_level; 129 unsigned int brightness_level;
71}; 130};
72 131
73static struct fujitsu_t *fujitsu; 132static struct fujitsu_t *fujitsu;
133static int use_alt_lcd_levels = -1;
134static int disable_brightness_keys = -1;
135static int disable_brightness_adjust = -1;
136
137/* Device used to access other hotkeys on the laptop */
138struct fujitsu_hotkey_t {
139 acpi_handle acpi_handle;
140 struct acpi_device *dev;
141 struct input_dev *input;
142 char phys[32];
143 struct platform_device *pf_device;
144 struct kfifo *fifo;
145 spinlock_t fifo_lock;
146
147 unsigned int irb; /* info about the pressed buttons */
148};
74 149
75/* Hardware access */ 150static struct fujitsu_hotkey_t *fujitsu_hotkey;
151
152static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
153 void *data);
154
155#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
156static u32 dbg_level = 0x03;
157#endif
158
159static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data);
160
161/* Hardware access for LCD brightness control */
76 162
77static int set_lcd_level(int level) 163static int set_lcd_level(int level)
78{ 164{
@@ -81,7 +167,10 @@ static int set_lcd_level(int level)
81 struct acpi_object_list arg_list = { 1, &arg0 }; 167 struct acpi_object_list arg_list = { 1, &arg0 };
82 acpi_handle handle = NULL; 168 acpi_handle handle = NULL;
83 169
84 if (level < 0 || level >= FUJITSU_LCD_N_LEVELS) 170 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
171 level);
172
173 if (level < 0 || level >= fujitsu->max_brightness)
85 return -EINVAL; 174 return -EINVAL;
86 175
87 if (!fujitsu) 176 if (!fujitsu)
@@ -89,7 +178,38 @@ static int set_lcd_level(int level)
89 178
90 status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle); 179 status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);
91 if (ACPI_FAILURE(status)) { 180 if (ACPI_FAILURE(status)) {
92 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "SBLL not present\n")); 181 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
182 return -ENODEV;
183 }
184
185 arg0.integer.value = level;
186
187 status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
188 if (ACPI_FAILURE(status))
189 return -ENODEV;
190
191 return 0;
192}
193
194static int set_lcd_level_alt(int level)
195{
196 acpi_status status = AE_OK;
197 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
198 struct acpi_object_list arg_list = { 1, &arg0 };
199 acpi_handle handle = NULL;
200
201 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
202 level);
203
204 if (level < 0 || level >= fujitsu->max_brightness)
205 return -EINVAL;
206
207 if (!fujitsu)
208 return -EINVAL;
209
210 status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle);
211 if (ACPI_FAILURE(status)) {
212 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
93 return -ENODEV; 213 return -ENODEV;
94 } 214 }
95 215
@@ -107,13 +227,52 @@ static int get_lcd_level(void)
107 unsigned long state = 0; 227 unsigned long state = 0;
108 acpi_status status = AE_OK; 228 acpi_status status = AE_OK;
109 229
110 // Get the Brightness 230 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
231
111 status = 232 status =
112 acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state); 233 acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);
113 if (status < 0) 234 if (status < 0)
114 return status; 235 return status;
115 236
116 fujitsu->fuj02b1_state = state; 237 fujitsu->brightness_level = state & 0x0fffffff;
238
239 if (state & 0x80000000)
240 fujitsu->brightness_changed = 1;
241 else
242 fujitsu->brightness_changed = 0;
243
244 return fujitsu->brightness_level;
245}
246
247static int get_max_brightness(void)
248{
249 unsigned long state = 0;
250 acpi_status status = AE_OK;
251
252 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
253
254 status =
255 acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state);
256 if (status < 0)
257 return status;
258
259 fujitsu->max_brightness = state;
260
261 return fujitsu->max_brightness;
262}
263
264static int get_lcd_level_alt(void)
265{
266 unsigned long state = 0;
267 acpi_status status = AE_OK;
268
269 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n");
270
271 status =
272 acpi_evaluate_integer(fujitsu->acpi_handle, "GBLS", NULL, &state);
273 if (status < 0)
274 return status;
275
117 fujitsu->brightness_level = state & 0x0fffffff; 276 fujitsu->brightness_level = state & 0x0fffffff;
118 277
119 if (state & 0x80000000) 278 if (state & 0x80000000)
@@ -128,12 +287,18 @@ static int get_lcd_level(void)
128 287
129static int bl_get_brightness(struct backlight_device *b) 288static int bl_get_brightness(struct backlight_device *b)
130{ 289{
131 return get_lcd_level(); 290 if (use_alt_lcd_levels)
291 return get_lcd_level_alt();
292 else
293 return get_lcd_level();
132} 294}
133 295
134static int bl_update_status(struct backlight_device *b) 296static int bl_update_status(struct backlight_device *b)
135{ 297{
136 return set_lcd_level(b->props.brightness); 298 if (use_alt_lcd_levels)
299 return set_lcd_level_alt(b->props.brightness);
300 else
301 return set_lcd_level(b->props.brightness);
137} 302}
138 303
139static struct backlight_ops fujitsubl_ops = { 304static struct backlight_ops fujitsubl_ops = {
@@ -141,7 +306,35 @@ static struct backlight_ops fujitsubl_ops = {
141 .update_status = bl_update_status, 306 .update_status = bl_update_status,
142}; 307};
143 308
144/* Platform device */ 309/* Platform LCD brightness device */
310
311static ssize_t
312show_max_brightness(struct device *dev,
313 struct device_attribute *attr, char *buf)
314{
315
316 int ret;
317
318 ret = get_max_brightness();
319 if (ret < 0)
320 return ret;
321
322 return sprintf(buf, "%i\n", ret);
323}
324
325static ssize_t
326show_brightness_changed(struct device *dev,
327 struct device_attribute *attr, char *buf)
328{
329
330 int ret;
331
332 ret = fujitsu->brightness_changed;
333 if (ret < 0)
334 return ret;
335
336 return sprintf(buf, "%i\n", ret);
337}
145 338
146static ssize_t show_lcd_level(struct device *dev, 339static ssize_t show_lcd_level(struct device *dev,
147 struct device_attribute *attr, char *buf) 340 struct device_attribute *attr, char *buf)
@@ -149,7 +342,10 @@ static ssize_t show_lcd_level(struct device *dev,
149 342
150 int ret; 343 int ret;
151 344
152 ret = get_lcd_level(); 345 if (use_alt_lcd_levels)
346 ret = get_lcd_level_alt();
347 else
348 ret = get_lcd_level();
153 if (ret < 0) 349 if (ret < 0)
154 return ret; 350 return ret;
155 351
@@ -164,19 +360,61 @@ static ssize_t store_lcd_level(struct device *dev,
164 int level, ret; 360 int level, ret;
165 361
166 if (sscanf(buf, "%i", &level) != 1 362 if (sscanf(buf, "%i", &level) != 1
167 || (level < 0 || level >= FUJITSU_LCD_N_LEVELS)) 363 || (level < 0 || level >= fujitsu->max_brightness))
168 return -EINVAL; 364 return -EINVAL;
169 365
170 ret = set_lcd_level(level); 366 if (use_alt_lcd_levels)
367 ret = set_lcd_level_alt(level);
368 else
369 ret = set_lcd_level(level);
370 if (ret < 0)
371 return ret;
372
373 if (use_alt_lcd_levels)
374 ret = get_lcd_level_alt();
375 else
376 ret = get_lcd_level();
171 if (ret < 0) 377 if (ret < 0)
172 return ret; 378 return ret;
173 379
174 return count; 380 return count;
175} 381}
176 382
383/* Hardware access for hotkey device */
384
385static int get_irb(void)
386{
387 unsigned long state = 0;
388 acpi_status status = AE_OK;
389
390 vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n");
391
392 status =
393 acpi_evaluate_integer(fujitsu_hotkey->acpi_handle, "GIRB", NULL,
394 &state);
395 if (status < 0)
396 return status;
397
398 fujitsu_hotkey->irb = state;
399
400 return fujitsu_hotkey->irb;
401}
402
403static ssize_t
404ignore_store(struct device *dev,
405 struct device_attribute *attr, const char *buf, size_t count)
406{
407 return count;
408}
409
410static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store);
411static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed,
412 ignore_store);
177static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level); 413static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
178 414
179static struct attribute *fujitsupf_attributes[] = { 415static struct attribute *fujitsupf_attributes[] = {
416 &dev_attr_brightness_changed.attr,
417 &dev_attr_max_brightness.attr,
180 &dev_attr_lcd_level.attr, 418 &dev_attr_lcd_level.attr,
181 NULL 419 NULL
182}; 420};
@@ -192,14 +430,52 @@ static struct platform_driver fujitsupf_driver = {
192 } 430 }
193}; 431};
194 432
195/* ACPI device */ 433static int dmi_check_cb_s6410(const struct dmi_system_id *id)
434{
435 acpi_handle handle;
436 int have_blnf;
437 printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n",
438 id->ident);
439 have_blnf = ACPI_SUCCESS
440 (acpi_get_handle(NULL, "\\_SB.PCI0.GFX0.LCD.BLNF", &handle));
441 if (use_alt_lcd_levels == -1) {
442 vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detecting usealt\n");
443 use_alt_lcd_levels = 1;
444 }
445 if (disable_brightness_keys == -1) {
446 vdbg_printk(FUJLAPTOP_DBG_TRACE,
447 "auto-detecting disable_keys\n");
448 disable_brightness_keys = have_blnf ? 1 : 0;
449 }
450 if (disable_brightness_adjust == -1) {
451 vdbg_printk(FUJLAPTOP_DBG_TRACE,
452 "auto-detecting disable_adjust\n");
453 disable_brightness_adjust = have_blnf ? 0 : 1;
454 }
455 return 0;
456}
457
458static struct dmi_system_id __initdata fujitsu_dmi_table[] = {
459 {
460 .ident = "Fujitsu Siemens",
461 .matches = {
462 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
463 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
464 },
465 .callback = dmi_check_cb_s6410},
466 {}
467};
468
469/* ACPI device for LCD brightness control */
196 470
197static int acpi_fujitsu_add(struct acpi_device *device) 471static int acpi_fujitsu_add(struct acpi_device *device)
198{ 472{
473 acpi_status status;
474 acpi_handle handle;
199 int result = 0; 475 int result = 0;
200 int state = 0; 476 int state = 0;
201 477 struct input_dev *input;
202 ACPI_FUNCTION_TRACE("acpi_fujitsu_add"); 478 int error;
203 479
204 if (!device) 480 if (!device)
205 return -EINVAL; 481 return -EINVAL;
@@ -209,10 +485,42 @@ static int acpi_fujitsu_add(struct acpi_device *device)
209 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); 485 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
210 acpi_driver_data(device) = fujitsu; 486 acpi_driver_data(device) = fujitsu;
211 487
488 status = acpi_install_notify_handler(device->handle,
489 ACPI_DEVICE_NOTIFY,
490 acpi_fujitsu_notify, fujitsu);
491
492 if (ACPI_FAILURE(status)) {
493 printk(KERN_ERR "Error installing notify handler\n");
494 error = -ENODEV;
495 goto err_stop;
496 }
497
498 fujitsu->input = input = input_allocate_device();
499 if (!input) {
500 error = -ENOMEM;
501 goto err_uninstall_notify;
502 }
503
504 snprintf(fujitsu->phys, sizeof(fujitsu->phys),
505 "%s/video/input0", acpi_device_hid(device));
506
507 input->name = acpi_device_name(device);
508 input->phys = fujitsu->phys;
509 input->id.bustype = BUS_HOST;
510 input->id.product = 0x06;
511 input->dev.parent = &device->dev;
512 input->evbit[0] = BIT(EV_KEY);
513 set_bit(KEY_BRIGHTNESSUP, input->keybit);
514 set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
515 set_bit(KEY_UNKNOWN, input->keybit);
516
517 error = input_register_device(input);
518 if (error)
519 goto err_free_input_dev;
520
212 result = acpi_bus_get_power(fujitsu->acpi_handle, &state); 521 result = acpi_bus_get_power(fujitsu->acpi_handle, &state);
213 if (result) { 522 if (result) {
214 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 523 printk(KERN_ERR "Error reading power state\n");
215 "Error reading power state\n"));
216 goto end; 524 goto end;
217 } 525 }
218 526
@@ -220,22 +528,373 @@ static int acpi_fujitsu_add(struct acpi_device *device)
220 acpi_device_name(device), acpi_device_bid(device), 528 acpi_device_name(device), acpi_device_bid(device),
221 !device->power.state ? "on" : "off"); 529 !device->power.state ? "on" : "off");
222 530
223 end: 531 fujitsu->dev = device;
532
533 if (ACPI_SUCCESS
534 (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
535 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
536 if (ACPI_FAILURE
537 (acpi_evaluate_object
538 (device->handle, METHOD_NAME__INI, NULL, NULL)))
539 printk(KERN_ERR "_INI Method failed\n");
540 }
541
542 /* do config (detect defaults) */
543 dmi_check_system(fujitsu_dmi_table);
544 use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
545 disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0;
546 disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
547 vdbg_printk(FUJLAPTOP_DBG_INFO,
548 "config: [alt interface: %d], [key disable: %d], [adjust disable: %d]\n",
549 use_alt_lcd_levels, disable_brightness_keys,
550 disable_brightness_adjust);
551
552 if (get_max_brightness() <= 0)
553 fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
554 if (use_alt_lcd_levels)
555 get_lcd_level_alt();
556 else
557 get_lcd_level();
558
559 return result;
560
561end:
562err_free_input_dev:
563 input_free_device(input);
564err_uninstall_notify:
565 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
566 acpi_fujitsu_notify);
567err_stop:
224 568
225 return result; 569 return result;
226} 570}
227 571
228static int acpi_fujitsu_remove(struct acpi_device *device, int type) 572static int acpi_fujitsu_remove(struct acpi_device *device, int type)
229{ 573{
230 ACPI_FUNCTION_TRACE("acpi_fujitsu_remove"); 574 acpi_status status;
575 struct fujitsu_t *fujitsu = NULL;
231 576
232 if (!device || !acpi_driver_data(device)) 577 if (!device || !acpi_driver_data(device))
233 return -EINVAL; 578 return -EINVAL;
579
580 fujitsu = acpi_driver_data(device);
581
582 status = acpi_remove_notify_handler(fujitsu->acpi_handle,
583 ACPI_DEVICE_NOTIFY,
584 acpi_fujitsu_notify);
585
586 if (!device || !acpi_driver_data(device))
587 return -EINVAL;
588
234 fujitsu->acpi_handle = NULL; 589 fujitsu->acpi_handle = NULL;
235 590
236 return 0; 591 return 0;
237} 592}
238 593
594/* Brightness notify */
595
596static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
597{
598 struct input_dev *input;
599 int keycode;
600 int oldb, newb;
601
602 input = fujitsu->input;
603
604 switch (event) {
605 case ACPI_FUJITSU_NOTIFY_CODE1:
606 keycode = 0;
607 oldb = fujitsu->brightness_level;
608 get_lcd_level(); /* the alt version always yields changed */
609 newb = fujitsu->brightness_level;
610
611 vdbg_printk(FUJLAPTOP_DBG_TRACE,
612 "brightness button event [%i -> %i (%i)]\n",
613 oldb, newb, fujitsu->brightness_changed);
614
615 if (oldb == newb && fujitsu->brightness_changed) {
616 keycode = 0;
617 if (disable_brightness_keys != 1) {
618 if (oldb == 0) {
619 acpi_bus_generate_proc_event(fujitsu->
620 dev,
621 ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS,
622 0);
623 keycode = KEY_BRIGHTNESSDOWN;
624 } else if (oldb ==
625 (fujitsu->max_brightness) - 1) {
626 acpi_bus_generate_proc_event(fujitsu->
627 dev,
628 ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS,
629 0);
630 keycode = KEY_BRIGHTNESSUP;
631 }
632 }
633 } else if (oldb < newb) {
634 if (disable_brightness_adjust != 1) {
635 if (use_alt_lcd_levels)
636 set_lcd_level_alt(newb);
637 else
638 set_lcd_level(newb);
639 }
640 if (disable_brightness_keys != 1) {
641 acpi_bus_generate_proc_event(fujitsu->dev,
642 ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS,
643 0);
644 keycode = KEY_BRIGHTNESSUP;
645 }
646 } else if (oldb > newb) {
647 if (disable_brightness_adjust != 1) {
648 if (use_alt_lcd_levels)
649 set_lcd_level_alt(newb);
650 else
651 set_lcd_level(newb);
652 }
653 if (disable_brightness_keys != 1) {
654 acpi_bus_generate_proc_event(fujitsu->dev,
655 ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS,
656 0);
657 keycode = KEY_BRIGHTNESSDOWN;
658 }
659 } else {
660 keycode = KEY_UNKNOWN;
661 }
662 break;
663 default:
664 keycode = KEY_UNKNOWN;
665 vdbg_printk(FUJLAPTOP_DBG_WARN,
666 "unsupported event [0x%x]\n", event);
667 break;
668 }
669
670 if (keycode != 0) {
671 input_report_key(input, keycode, 1);
672 input_sync(input);
673 input_report_key(input, keycode, 0);
674 input_sync(input);
675 }
676
677 return;
678}
679
680/* ACPI device for hotkey handling */
681
682static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
683{
684 acpi_status status;
685 acpi_handle handle;
686 int result = 0;
687 int state = 0;
688 struct input_dev *input;
689 int error;
690 int i;
691
692 if (!device)
693 return -EINVAL;
694
695 fujitsu_hotkey->acpi_handle = device->handle;
696 sprintf(acpi_device_name(device), "%s",
697 ACPI_FUJITSU_HOTKEY_DEVICE_NAME);
698 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
699 acpi_driver_data(device) = fujitsu_hotkey;
700
701 status = acpi_install_notify_handler(device->handle,
702 ACPI_DEVICE_NOTIFY,
703 acpi_fujitsu_hotkey_notify,
704 fujitsu_hotkey);
705
706 if (ACPI_FAILURE(status)) {
707 printk(KERN_ERR "Error installing notify handler\n");
708 error = -ENODEV;
709 goto err_stop;
710 }
711
712 /* kfifo */
713 spin_lock_init(&fujitsu_hotkey->fifo_lock);
714 fujitsu_hotkey->fifo =
715 kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL,
716 &fujitsu_hotkey->fifo_lock);
717 if (IS_ERR(fujitsu_hotkey->fifo)) {
718 printk(KERN_ERR "kfifo_alloc failed\n");
719 error = PTR_ERR(fujitsu_hotkey->fifo);
720 goto err_stop;
721 }
722
723 fujitsu_hotkey->input = input = input_allocate_device();
724 if (!input) {
725 error = -ENOMEM;
726 goto err_uninstall_notify;
727 }
728
729 snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys),
730 "%s/video/input0", acpi_device_hid(device));
731
732 input->name = acpi_device_name(device);
733 input->phys = fujitsu_hotkey->phys;
734 input->id.bustype = BUS_HOST;
735 input->id.product = 0x06;
736 input->dev.parent = &device->dev;
737 input->evbit[0] = BIT(EV_KEY);
738 set_bit(KEY_SCREENLOCK, input->keybit);
739 set_bit(KEY_MEDIA, input->keybit);
740 set_bit(KEY_EMAIL, input->keybit);
741 set_bit(KEY_SUSPEND, input->keybit);
742 set_bit(KEY_UNKNOWN, input->keybit);
743
744 error = input_register_device(input);
745 if (error)
746 goto err_free_input_dev;
747
748 result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state);
749 if (result) {
750 printk(KERN_ERR "Error reading power state\n");
751 goto end;
752 }
753
754 printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
755 acpi_device_name(device), acpi_device_bid(device),
756 !device->power.state ? "on" : "off");
757
758 fujitsu_hotkey->dev = device;
759
760 if (ACPI_SUCCESS
761 (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
762 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
763 if (ACPI_FAILURE
764 (acpi_evaluate_object
765 (device->handle, METHOD_NAME__INI, NULL, NULL)))
766 printk(KERN_ERR "_INI Method failed\n");
767 }
768
769 i = 0; /* Discard hotkey ringbuffer */
770 while (get_irb() != 0 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) ;
771 vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
772
773 return result;
774
775end:
776err_free_input_dev:
777 input_free_device(input);
778err_uninstall_notify:
779 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
780 acpi_fujitsu_hotkey_notify);
781 kfifo_free(fujitsu_hotkey->fifo);
782err_stop:
783
784 return result;
785}
786
787static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
788{
789 acpi_status status;
790 struct fujitsu_hotkey_t *fujitsu_hotkey = NULL;
791
792 if (!device || !acpi_driver_data(device))
793 return -EINVAL;
794
795 fujitsu_hotkey = acpi_driver_data(device);
796
797 status = acpi_remove_notify_handler(fujitsu_hotkey->acpi_handle,
798 ACPI_DEVICE_NOTIFY,
799 acpi_fujitsu_hotkey_notify);
800
801 fujitsu_hotkey->acpi_handle = NULL;
802
803 kfifo_free(fujitsu_hotkey->fifo);
804
805 return 0;
806}
807
808static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
809 void *data)
810{
811 struct input_dev *input;
812 int keycode, keycode_r;
813 unsigned int irb = 1;
814 int i, status;
815
816 input = fujitsu_hotkey->input;
817
818 vdbg_printk(FUJLAPTOP_DBG_TRACE, "Hotkey event\n");
819
820 switch (event) {
821 case ACPI_FUJITSU_NOTIFY_CODE1:
822 i = 0;
823 while ((irb = get_irb()) != 0
824 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
825 vdbg_printk(FUJLAPTOP_DBG_TRACE, "GIRB result [%x]\n",
826 irb);
827
828 switch (irb & 0x4ff) {
829 case LOCK_KEY:
830 keycode = KEY_SCREENLOCK;
831 break;
832 case DISPLAY_KEY:
833 keycode = KEY_MEDIA;
834 break;
835 case ENERGY_KEY:
836 keycode = KEY_EMAIL;
837 break;
838 case REST_KEY:
839 keycode = KEY_SUSPEND;
840 break;
841 case 0:
842 keycode = 0;
843 break;
844 default:
845 vdbg_printk(FUJLAPTOP_DBG_WARN,
846 "Unknown GIRB result [%x]\n", irb);
847 keycode = -1;
848 break;
849 }
850 if (keycode > 0) {
851 vdbg_printk(FUJLAPTOP_DBG_TRACE,
852 "Push keycode into ringbuffer [%d]\n",
853 keycode);
854 status = kfifo_put(fujitsu_hotkey->fifo,
855 (unsigned char *)&keycode,
856 sizeof(keycode));
857 if (status != sizeof(keycode)) {
858 vdbg_printk(FUJLAPTOP_DBG_WARN,
859 "Could not push keycode [0x%x]\n",
860 keycode);
861 } else {
862 input_report_key(input, keycode, 1);
863 input_sync(input);
864 }
865 } else if (keycode == 0) {
866 while ((status =
867 kfifo_get
868 (fujitsu_hotkey->fifo, (unsigned char *)
869 &keycode_r,
870 sizeof
871 (keycode_r))) == sizeof(keycode_r)) {
872 input_report_key(input, keycode_r, 0);
873 input_sync(input);
874 vdbg_printk(FUJLAPTOP_DBG_TRACE,
875 "Pop keycode from ringbuffer [%d]\n",
876 keycode_r);
877 }
878 }
879 }
880
881 break;
882 default:
883 keycode = KEY_UNKNOWN;
884 vdbg_printk(FUJLAPTOP_DBG_WARN,
885 "Unsupported event [0x%x]\n", event);
886 input_report_key(input, keycode, 1);
887 input_sync(input);
888 input_report_key(input, keycode, 0);
889 input_sync(input);
890 break;
891 }
892
893 return;
894}
895
896/* Initialization */
897
239static const struct acpi_device_id fujitsu_device_ids[] = { 898static const struct acpi_device_id fujitsu_device_ids[] = {
240 {ACPI_FUJITSU_HID, 0}, 899 {ACPI_FUJITSU_HID, 0},
241 {"", 0}, 900 {"", 0},
@@ -251,11 +910,24 @@ static struct acpi_driver acpi_fujitsu_driver = {
251 }, 910 },
252}; 911};
253 912
254/* Initialization */ 913static const struct acpi_device_id fujitsu_hotkey_device_ids[] = {
914 {ACPI_FUJITSU_HOTKEY_HID, 0},
915 {"", 0},
916};
917
918static struct acpi_driver acpi_fujitsu_hotkey_driver = {
919 .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME,
920 .class = ACPI_FUJITSU_CLASS,
921 .ids = fujitsu_hotkey_device_ids,
922 .ops = {
923 .add = acpi_fujitsu_hotkey_add,
924 .remove = acpi_fujitsu_hotkey_remove,
925 },
926};
255 927
256static int __init fujitsu_init(void) 928static int __init fujitsu_init(void)
257{ 929{
258 int ret, result; 930 int ret, result, max_brightness;
259 931
260 if (acpi_disabled) 932 if (acpi_disabled)
261 return -ENODEV; 933 return -ENODEV;
@@ -271,19 +943,6 @@ static int __init fujitsu_init(void)
271 goto fail_acpi; 943 goto fail_acpi;
272 } 944 }
273 945
274 /* Register backlight stuff */
275
276 fujitsu->bl_device =
277 backlight_device_register("fujitsu-laptop", NULL, NULL,
278 &fujitsubl_ops);
279 if (IS_ERR(fujitsu->bl_device))
280 return PTR_ERR(fujitsu->bl_device);
281
282 fujitsu->bl_device->props.max_brightness = FUJITSU_LCD_N_LEVELS - 1;
283 ret = platform_driver_register(&fujitsupf_driver);
284 if (ret)
285 goto fail_backlight;
286
287 /* Register platform stuff */ 946 /* Register platform stuff */
288 947
289 fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1); 948 fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1);
@@ -302,28 +961,68 @@ static int __init fujitsu_init(void)
302 if (ret) 961 if (ret)
303 goto fail_platform_device2; 962 goto fail_platform_device2;
304 963
964 /* Register backlight stuff */
965
966 fujitsu->bl_device =
967 backlight_device_register("fujitsu-laptop", NULL, NULL,
968 &fujitsubl_ops);
969 if (IS_ERR(fujitsu->bl_device))
970 return PTR_ERR(fujitsu->bl_device);
971
972 max_brightness = fujitsu->max_brightness;
973
974 fujitsu->bl_device->props.max_brightness = max_brightness - 1;
975 fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
976
977 ret = platform_driver_register(&fujitsupf_driver);
978 if (ret)
979 goto fail_backlight;
980
981 /* Register hotkey driver */
982
983 fujitsu_hotkey = kmalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);
984 if (!fujitsu_hotkey) {
985 ret = -ENOMEM;
986 goto fail_hotkey;
987 }
988 memset(fujitsu_hotkey, 0, sizeof(struct fujitsu_hotkey_t));
989
990 result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver);
991 if (result < 0) {
992 ret = -ENODEV;
993 goto fail_hotkey1;
994 }
995
305 printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION 996 printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION
306 " successfully loaded.\n"); 997 " successfully loaded.\n");
307 998
308 return 0; 999 return 0;
309 1000
310 fail_platform_device2: 1001fail_hotkey1:
311 1002
312 platform_device_del(fujitsu->pf_device); 1003 kfree(fujitsu_hotkey);
313
314 fail_platform_device1:
315
316 platform_device_put(fujitsu->pf_device);
317 1004
318 fail_platform_driver: 1005fail_hotkey:
319 1006
320 platform_driver_unregister(&fujitsupf_driver); 1007 platform_driver_unregister(&fujitsupf_driver);
321 1008
322 fail_backlight: 1009fail_backlight:
323 1010
324 backlight_device_unregister(fujitsu->bl_device); 1011 backlight_device_unregister(fujitsu->bl_device);
325 1012
326 fail_acpi: 1013fail_platform_device2:
1014
1015 platform_device_del(fujitsu->pf_device);
1016
1017fail_platform_device1:
1018
1019 platform_device_put(fujitsu->pf_device);
1020
1021fail_platform_driver:
1022
1023 acpi_bus_unregister_driver(&acpi_fujitsu_driver);
1024
1025fail_acpi:
327 1026
328 kfree(fujitsu); 1027 kfree(fujitsu);
329 1028
@@ -342,19 +1041,43 @@ static void __exit fujitsu_cleanup(void)
342 1041
343 kfree(fujitsu); 1042 kfree(fujitsu);
344 1043
1044 acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);
1045
1046 kfree(fujitsu_hotkey);
1047
345 printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n"); 1048 printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n");
346} 1049}
347 1050
348module_init(fujitsu_init); 1051module_init(fujitsu_init);
349module_exit(fujitsu_cleanup); 1052module_exit(fujitsu_cleanup);
350 1053
351MODULE_AUTHOR("Jonathan Woithe"); 1054module_param(use_alt_lcd_levels, uint, 0644);
1055MODULE_PARM_DESC(use_alt_lcd_levels,
1056 "Use alternative interface for lcd_levels (needed for Lifebook s6410).");
1057module_param(disable_brightness_keys, uint, 0644);
1058MODULE_PARM_DESC(disable_brightness_keys,
1059 "Disable brightness keys (eg. if they are already handled by the generic ACPI_VIDEO device).");
1060module_param(disable_brightness_adjust, uint, 0644);
1061MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment .");
1062#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
1063module_param_named(debug, dbg_level, uint, 0644);
1064MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
1065#endif
1066
1067MODULE_AUTHOR("Jonathan Woithe, Peter Gruber");
352MODULE_DESCRIPTION("Fujitsu laptop extras support"); 1068MODULE_DESCRIPTION("Fujitsu laptop extras support");
353MODULE_VERSION(FUJITSU_DRIVER_VERSION); 1069MODULE_VERSION(FUJITSU_DRIVER_VERSION);
354MODULE_LICENSE("GPL"); 1070MODULE_LICENSE("GPL");
355 1071
1072MODULE_ALIAS
1073 ("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
1074MODULE_ALIAS
1075 ("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
1076
356static struct pnp_device_id pnp_ids[] = { 1077static struct pnp_device_id pnp_ids[] = {
357 { .id = "FUJ02bf" }, 1078 { .id = "FUJ02bf" },
1079 { .id = "FUJ02B1" },
1080 { .id = "FUJ02E3" },
358 { .id = "" } 1081 { .id = "" }
359}; 1082};
360MODULE_DEVICE_TABLE(pnp, pnp_ids); 1083MODULE_DEVICE_TABLE(pnp, pnp_ids);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f9ad960d7c1a..66e5a5487c20 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2,7 +2,7 @@
2 * Block driver for media (i.e., flash cards) 2 * Block driver for media (i.e., flash cards)
3 * 3 *
4 * Copyright 2002 Hewlett-Packard Company 4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2007 Pierre Ossman 5 * Copyright 2005-2008 Pierre Ossman
6 * 6 *
7 * Use consistent with the GNU GPL is permitted, 7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is 8 * provided that this copyright notice is
@@ -237,17 +237,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
237 if (brq.data.blocks > card->host->max_blk_count) 237 if (brq.data.blocks > card->host->max_blk_count)
238 brq.data.blocks = card->host->max_blk_count; 238 brq.data.blocks = card->host->max_blk_count;
239 239
240 /*
241 * If the host doesn't support multiple block writes, force
242 * block writes to single block. SD cards are excepted from
243 * this rule as they support querying the number of
244 * successfully written sectors.
245 */
246 if (rq_data_dir(req) != READ &&
247 !(card->host->caps & MMC_CAP_MULTIWRITE) &&
248 !mmc_card_sd(card))
249 brq.data.blocks = 1;
250
251 if (brq.data.blocks > 1) { 240 if (brq.data.blocks > 1) {
252 /* SPI multiblock writes terminate using a special 241 /* SPI multiblock writes terminate using a special
253 * token, not a STOP_TRANSMISSION request. 242 * token, not a STOP_TRANSMISSION request.
@@ -296,22 +285,24 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
296 285
297 mmc_queue_bounce_post(mq); 286 mmc_queue_bounce_post(mq);
298 287
288 /*
289 * Check for errors here, but don't jump to cmd_err
290 * until later as we need to wait for the card to leave
291 * programming mode even when things go wrong.
292 */
299 if (brq.cmd.error) { 293 if (brq.cmd.error) {
300 printk(KERN_ERR "%s: error %d sending read/write command\n", 294 printk(KERN_ERR "%s: error %d sending read/write command\n",
301 req->rq_disk->disk_name, brq.cmd.error); 295 req->rq_disk->disk_name, brq.cmd.error);
302 goto cmd_err;
303 } 296 }
304 297
305 if (brq.data.error) { 298 if (brq.data.error) {
306 printk(KERN_ERR "%s: error %d transferring data\n", 299 printk(KERN_ERR "%s: error %d transferring data\n",
307 req->rq_disk->disk_name, brq.data.error); 300 req->rq_disk->disk_name, brq.data.error);
308 goto cmd_err;
309 } 301 }
310 302
311 if (brq.stop.error) { 303 if (brq.stop.error) {
312 printk(KERN_ERR "%s: error %d sending stop command\n", 304 printk(KERN_ERR "%s: error %d sending stop command\n",
313 req->rq_disk->disk_name, brq.stop.error); 305 req->rq_disk->disk_name, brq.stop.error);
314 goto cmd_err;
315 } 306 }
316 307
317 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 308 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
@@ -344,6 +335,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
344#endif 335#endif
345 } 336 }
346 337
338 if (brq.cmd.error || brq.data.error || brq.stop.error)
339 goto cmd_err;
340
347 /* 341 /*
348 * A block was successfully transferred. 342 * A block was successfully transferred.
349 */ 343 */
@@ -362,30 +356,32 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
362 * mark the known good sectors as ok. 356 * mark the known good sectors as ok.
363 * 357 *
364 * If the card is not SD, we can still ok written sectors 358 * If the card is not SD, we can still ok written sectors
365 * if the controller can do proper error reporting. 359 * as reported by the controller (which might be less than
360 * the real number of written sectors, but never more).
366 * 361 *
367 * For reads we just fail the entire chunk as that should 362 * For reads we just fail the entire chunk as that should
368 * be safe in all cases. 363 * be safe in all cases.
369 */ 364 */
370 if (rq_data_dir(req) != READ && mmc_card_sd(card)) { 365 if (rq_data_dir(req) != READ) {
371 u32 blocks; 366 if (mmc_card_sd(card)) {
372 unsigned int bytes; 367 u32 blocks;
373 368 unsigned int bytes;
374 blocks = mmc_sd_num_wr_blocks(card); 369
375 if (blocks != (u32)-1) { 370 blocks = mmc_sd_num_wr_blocks(card);
376 if (card->csd.write_partial) 371 if (blocks != (u32)-1) {
377 bytes = blocks << md->block_bits; 372 if (card->csd.write_partial)
378 else 373 bytes = blocks << md->block_bits;
379 bytes = blocks << 9; 374 else
375 bytes = blocks << 9;
376 spin_lock_irq(&md->lock);
377 ret = __blk_end_request(req, 0, bytes);
378 spin_unlock_irq(&md->lock);
379 }
380 } else {
380 spin_lock_irq(&md->lock); 381 spin_lock_irq(&md->lock);
381 ret = __blk_end_request(req, 0, bytes); 382 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
382 spin_unlock_irq(&md->lock); 383 spin_unlock_irq(&md->lock);
383 } 384 }
384 } else if (rq_data_dir(req) != READ &&
385 (card->host->caps & MMC_CAP_MULTIWRITE)) {
386 spin_lock_irq(&md->lock);
387 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
388 spin_unlock_irq(&md->lock);
389 } 385 }
390 386
391 mmc_release_host(card->host); 387 mmc_release_host(card->host);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index ffadee549a41..d6b9b486417c 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/mmc/card/mmc_test.c 2 * linux/drivers/mmc/card/mmc_test.c
3 * 3 *
4 * Copyright 2007 Pierre Ossman 4 * Copyright 2007-2008 Pierre Ossman
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -26,13 +26,17 @@
26struct mmc_test_card { 26struct mmc_test_card {
27 struct mmc_card *card; 27 struct mmc_card *card;
28 28
29 u8 scratch[BUFFER_SIZE];
29 u8 *buffer; 30 u8 *buffer;
30}; 31};
31 32
32/*******************************************************************/ 33/*******************************************************************/
33/* Helper functions */ 34/* General helper functions */
34/*******************************************************************/ 35/*******************************************************************/
35 36
37/*
38 * Configure correct block size in card
39 */
36static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 40static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
37{ 41{
38 struct mmc_command cmd; 42 struct mmc_command cmd;
@@ -48,117 +52,61 @@ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
48 return 0; 52 return 0;
49} 53}
50 54
51static int __mmc_test_transfer(struct mmc_test_card *test, int write, 55/*
52 unsigned broken_xfer, u8 *buffer, unsigned addr, 56 * Fill in the mmc_request structure given a set of transfer parameters.
53 unsigned blocks, unsigned blksz) 57 */
58static void mmc_test_prepare_mrq(struct mmc_test_card *test,
59 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
60 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
54{ 61{
55 int ret, busy; 62 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
56
57 struct mmc_request mrq;
58 struct mmc_command cmd;
59 struct mmc_command stop;
60 struct mmc_data data;
61
62 struct scatterlist sg;
63
64 memset(&mrq, 0, sizeof(struct mmc_request));
65
66 mrq.cmd = &cmd;
67 mrq.data = &data;
68
69 memset(&cmd, 0, sizeof(struct mmc_command));
70 63
71 if (broken_xfer) { 64 if (blocks > 1) {
72 if (blocks > 1) { 65 mrq->cmd->opcode = write ?
73 cmd.opcode = write ? 66 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
74 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
75 } else {
76 cmd.opcode = MMC_SEND_STATUS;
77 }
78 } else { 67 } else {
79 if (blocks > 1) { 68 mrq->cmd->opcode = write ?
80 cmd.opcode = write ? 69 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
81 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
82 } else {
83 cmd.opcode = write ?
84 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
85 }
86 } 70 }
87 71
88 if (broken_xfer && blocks == 1) 72 mrq->cmd->arg = dev_addr;
89 cmd.arg = test->card->rca << 16; 73 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
90 else
91 cmd.arg = addr;
92 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
93 74
94 memset(&stop, 0, sizeof(struct mmc_command)); 75 if (blocks == 1)
95 76 mrq->stop = NULL;
96 if (!broken_xfer && (blocks > 1)) { 77 else {
97 stop.opcode = MMC_STOP_TRANSMISSION; 78 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
98 stop.arg = 0; 79 mrq->stop->arg = 0;
99 stop.flags = MMC_RSP_R1B | MMC_CMD_AC; 80 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
100
101 mrq.stop = &stop;
102 } 81 }
103 82
104 memset(&data, 0, sizeof(struct mmc_data)); 83 mrq->data->blksz = blksz;
105 84 mrq->data->blocks = blocks;
106 data.blksz = blksz; 85 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
107 data.blocks = blocks; 86 mrq->data->sg = sg;
108 data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 87 mrq->data->sg_len = sg_len;
109 data.sg = &sg;
110 data.sg_len = 1;
111
112 sg_init_one(&sg, buffer, blocks * blksz);
113
114 mmc_set_data_timeout(&data, test->card);
115 88
116 mmc_wait_for_req(test->card->host, &mrq); 89 mmc_set_data_timeout(mrq->data, test->card);
117 90}
118 ret = 0;
119
120 if (broken_xfer) {
121 if (!ret && cmd.error)
122 ret = cmd.error;
123 if (!ret && data.error == 0)
124 ret = RESULT_FAIL;
125 if (!ret && data.error != -ETIMEDOUT)
126 ret = data.error;
127 if (!ret && stop.error)
128 ret = stop.error;
129 if (blocks > 1) {
130 if (!ret && data.bytes_xfered > blksz)
131 ret = RESULT_FAIL;
132 } else {
133 if (!ret && data.bytes_xfered > 0)
134 ret = RESULT_FAIL;
135 }
136 } else {
137 if (!ret && cmd.error)
138 ret = cmd.error;
139 if (!ret && data.error)
140 ret = data.error;
141 if (!ret && stop.error)
142 ret = stop.error;
143 if (!ret && data.bytes_xfered != blocks * blksz)
144 ret = RESULT_FAIL;
145 }
146 91
147 if (ret == -EINVAL) 92/*
148 ret = RESULT_UNSUP_HOST; 93 * Wait for the card to finish the busy state
94 */
95static int mmc_test_wait_busy(struct mmc_test_card *test)
96{
97 int ret, busy;
98 struct mmc_command cmd;
149 99
150 busy = 0; 100 busy = 0;
151 do { 101 do {
152 int ret2;
153
154 memset(&cmd, 0, sizeof(struct mmc_command)); 102 memset(&cmd, 0, sizeof(struct mmc_command));
155 103
156 cmd.opcode = MMC_SEND_STATUS; 104 cmd.opcode = MMC_SEND_STATUS;
157 cmd.arg = test->card->rca << 16; 105 cmd.arg = test->card->rca << 16;
158 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 106 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
159 107
160 ret2 = mmc_wait_for_cmd(test->card->host, &cmd, 0); 108 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
161 if (ret2) 109 if (ret)
162 break; 110 break;
163 111
164 if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { 112 if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
@@ -172,14 +120,57 @@ static int __mmc_test_transfer(struct mmc_test_card *test, int write,
172 return ret; 120 return ret;
173} 121}
174 122
175static int mmc_test_transfer(struct mmc_test_card *test, int write, 123/*
176 u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) 124 * Transfer a single sector of kernel addressable data
125 */
126static int mmc_test_buffer_transfer(struct mmc_test_card *test,
127 u8 *buffer, unsigned addr, unsigned blksz, int write)
177{ 128{
178 return __mmc_test_transfer(test, write, 0, buffer, 129 int ret;
179 addr, blocks, blksz); 130
131 struct mmc_request mrq;
132 struct mmc_command cmd;
133 struct mmc_command stop;
134 struct mmc_data data;
135
136 struct scatterlist sg;
137
138 memset(&mrq, 0, sizeof(struct mmc_request));
139 memset(&cmd, 0, sizeof(struct mmc_command));
140 memset(&data, 0, sizeof(struct mmc_data));
141 memset(&stop, 0, sizeof(struct mmc_command));
142
143 mrq.cmd = &cmd;
144 mrq.data = &data;
145 mrq.stop = &stop;
146
147 sg_init_one(&sg, buffer, blksz);
148
149 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
150
151 mmc_wait_for_req(test->card->host, &mrq);
152
153 if (cmd.error)
154 return cmd.error;
155 if (data.error)
156 return data.error;
157
158 ret = mmc_test_wait_busy(test);
159 if (ret)
160 return ret;
161
162 return 0;
180} 163}
181 164
182static int mmc_test_prepare_verify(struct mmc_test_card *test, int write) 165/*******************************************************************/
166/* Test preparation and cleanup */
167/*******************************************************************/
168
169/*
170 * Fill the first couple of sectors of the card with known data
171 * so that bad reads/writes can be detected
172 */
173static int __mmc_test_prepare(struct mmc_test_card *test, int write)
183{ 174{
184 int ret, i; 175 int ret, i;
185 176
@@ -188,15 +179,14 @@ static int mmc_test_prepare_verify(struct mmc_test_card *test, int write)
188 return ret; 179 return ret;
189 180
190 if (write) 181 if (write)
191 memset(test->buffer, 0xDF, BUFFER_SIZE); 182 memset(test->buffer, 0xDF, 512);
192 else { 183 else {
193 for (i = 0;i < BUFFER_SIZE;i++) 184 for (i = 0;i < 512;i++)
194 test->buffer[i] = i; 185 test->buffer[i] = i;
195 } 186 }
196 187
197 for (i = 0;i < BUFFER_SIZE / 512;i++) { 188 for (i = 0;i < BUFFER_SIZE / 512;i++) {
198 ret = mmc_test_transfer(test, 1, test->buffer + i * 512, 189 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
199 i * 512, 1, 512);
200 if (ret) 190 if (ret)
201 return ret; 191 return ret;
202 } 192 }
@@ -204,41 +194,218 @@ static int mmc_test_prepare_verify(struct mmc_test_card *test, int write)
204 return 0; 194 return 0;
205} 195}
206 196
207static int mmc_test_prepare_verify_write(struct mmc_test_card *test) 197static int mmc_test_prepare_write(struct mmc_test_card *test)
198{
199 return __mmc_test_prepare(test, 1);
200}
201
202static int mmc_test_prepare_read(struct mmc_test_card *test)
203{
204 return __mmc_test_prepare(test, 0);
205}
206
207static int mmc_test_cleanup(struct mmc_test_card *test)
208{ 208{
209 return mmc_test_prepare_verify(test, 1); 209 int ret, i;
210
211 ret = mmc_test_set_blksize(test, 512);
212 if (ret)
213 return ret;
214
215 memset(test->buffer, 0, 512);
216
217 for (i = 0;i < BUFFER_SIZE / 512;i++) {
218 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
219 if (ret)
220 return ret;
221 }
222
223 return 0;
210} 224}
211 225
212static int mmc_test_prepare_verify_read(struct mmc_test_card *test) 226/*******************************************************************/
227/* Test execution helpers */
228/*******************************************************************/
229
230/*
231 * Modifies the mmc_request to perform the "short transfer" tests
232 */
233static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
234 struct mmc_request *mrq, int write)
213{ 235{
214 return mmc_test_prepare_verify(test, 0); 236 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
237
238 if (mrq->data->blocks > 1) {
239 mrq->cmd->opcode = write ?
240 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
241 mrq->stop = NULL;
242 } else {
243 mrq->cmd->opcode = MMC_SEND_STATUS;
244 mrq->cmd->arg = test->card->rca << 16;
245 }
215} 246}
216 247
217static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, 248/*
218 u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) 249 * Checks that a normal transfer didn't have any errors
250 */
251static int mmc_test_check_result(struct mmc_test_card *test,
252 struct mmc_request *mrq)
219{ 253{
220 int ret, i, sectors; 254 int ret;
221 255
222 /* 256 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
223 * It is assumed that the above preparation has been done. 257
224 */ 258 ret = 0;
225 259
226 memset(test->buffer, 0, BUFFER_SIZE); 260 if (!ret && mrq->cmd->error)
261 ret = mrq->cmd->error;
262 if (!ret && mrq->data->error)
263 ret = mrq->data->error;
264 if (!ret && mrq->stop && mrq->stop->error)
265 ret = mrq->stop->error;
266 if (!ret && mrq->data->bytes_xfered !=
267 mrq->data->blocks * mrq->data->blksz)
268 ret = RESULT_FAIL;
269
270 if (ret == -EINVAL)
271 ret = RESULT_UNSUP_HOST;
272
273 return ret;
274}
275
276/*
277 * Checks that a "short transfer" behaved as expected
278 */
279static int mmc_test_check_broken_result(struct mmc_test_card *test,
280 struct mmc_request *mrq)
281{
282 int ret;
283
284 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
285
286 ret = 0;
287
288 if (!ret && mrq->cmd->error)
289 ret = mrq->cmd->error;
290 if (!ret && mrq->data->error == 0)
291 ret = RESULT_FAIL;
292 if (!ret && mrq->data->error != -ETIMEDOUT)
293 ret = mrq->data->error;
294 if (!ret && mrq->stop && mrq->stop->error)
295 ret = mrq->stop->error;
296 if (mrq->data->blocks > 1) {
297 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
298 ret = RESULT_FAIL;
299 } else {
300 if (!ret && mrq->data->bytes_xfered > 0)
301 ret = RESULT_FAIL;
302 }
303
304 if (ret == -EINVAL)
305 ret = RESULT_UNSUP_HOST;
306
307 return ret;
308}
309
310/*
311 * Tests a basic transfer with certain parameters
312 */
313static int mmc_test_simple_transfer(struct mmc_test_card *test,
314 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
315 unsigned blocks, unsigned blksz, int write)
316{
317 struct mmc_request mrq;
318 struct mmc_command cmd;
319 struct mmc_command stop;
320 struct mmc_data data;
321
322 memset(&mrq, 0, sizeof(struct mmc_request));
323 memset(&cmd, 0, sizeof(struct mmc_command));
324 memset(&data, 0, sizeof(struct mmc_data));
325 memset(&stop, 0, sizeof(struct mmc_command));
326
327 mrq.cmd = &cmd;
328 mrq.data = &data;
329 mrq.stop = &stop;
330
331 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
332 blocks, blksz, write);
333
334 mmc_wait_for_req(test->card->host, &mrq);
335
336 mmc_test_wait_busy(test);
337
338 return mmc_test_check_result(test, &mrq);
339}
340
341/*
342 * Tests a transfer where the card will fail completely or partly
343 */
344static int mmc_test_broken_transfer(struct mmc_test_card *test,
345 unsigned blocks, unsigned blksz, int write)
346{
347 struct mmc_request mrq;
348 struct mmc_command cmd;
349 struct mmc_command stop;
350 struct mmc_data data;
351
352 struct scatterlist sg;
353
354 memset(&mrq, 0, sizeof(struct mmc_request));
355 memset(&cmd, 0, sizeof(struct mmc_command));
356 memset(&data, 0, sizeof(struct mmc_data));
357 memset(&stop, 0, sizeof(struct mmc_command));
358
359 mrq.cmd = &cmd;
360 mrq.data = &data;
361 mrq.stop = &stop;
362
363 sg_init_one(&sg, test->buffer, blocks * blksz);
364
365 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
366 mmc_test_prepare_broken_mrq(test, &mrq, write);
367
368 mmc_wait_for_req(test->card->host, &mrq);
369
370 mmc_test_wait_busy(test);
371
372 return mmc_test_check_broken_result(test, &mrq);
373}
374
375/*
376 * Does a complete transfer test where data is also validated
377 *
378 * Note: mmc_test_prepare() must have been done before this call
379 */
380static int mmc_test_transfer(struct mmc_test_card *test,
381 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
382 unsigned blocks, unsigned blksz, int write)
383{
384 int ret, i;
385 unsigned long flags;
227 386
228 if (write) { 387 if (write) {
229 for (i = 0;i < blocks * blksz;i++) 388 for (i = 0;i < blocks * blksz;i++)
230 buffer[i] = i; 389 test->scratch[i] = i;
390 } else {
391 memset(test->scratch, 0, BUFFER_SIZE);
231 } 392 }
393 local_irq_save(flags);
394 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
395 local_irq_restore(flags);
232 396
233 ret = mmc_test_set_blksize(test, blksz); 397 ret = mmc_test_set_blksize(test, blksz);
234 if (ret) 398 if (ret)
235 return ret; 399 return ret;
236 400
237 ret = mmc_test_transfer(test, write, buffer, addr, blocks, blksz); 401 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
402 blocks, blksz, write);
238 if (ret) 403 if (ret)
239 return ret; 404 return ret;
240 405
241 if (write) { 406 if (write) {
407 int sectors;
408
242 ret = mmc_test_set_blksize(test, 512); 409 ret = mmc_test_set_blksize(test, 512);
243 if (ret) 410 if (ret)
244 return ret; 411 return ret;
@@ -253,9 +420,9 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write,
253 memset(test->buffer, 0, sectors * 512); 420 memset(test->buffer, 0, sectors * 512);
254 421
255 for (i = 0;i < sectors;i++) { 422 for (i = 0;i < sectors;i++) {
256 ret = mmc_test_transfer(test, 0, 423 ret = mmc_test_buffer_transfer(test,
257 test->buffer + i * 512, 424 test->buffer + i * 512,
258 addr + i * 512, 1, 512); 425 dev_addr + i * 512, 512, 0);
259 if (ret) 426 if (ret)
260 return ret; 427 return ret;
261 } 428 }
@@ -270,8 +437,11 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write,
270 return RESULT_FAIL; 437 return RESULT_FAIL;
271 } 438 }
272 } else { 439 } else {
440 local_irq_save(flags);
441 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
442 local_irq_restore(flags);
273 for (i = 0;i < blocks * blksz;i++) { 443 for (i = 0;i < blocks * blksz;i++) {
274 if (buffer[i] != (u8)i) 444 if (test->scratch[i] != (u8)i)
275 return RESULT_FAIL; 445 return RESULT_FAIL;
276 } 446 }
277 } 447 }
@@ -279,26 +449,6 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write,
279 return 0; 449 return 0;
280} 450}
281 451
282static int mmc_test_cleanup_verify(struct mmc_test_card *test)
283{
284 int ret, i;
285
286 ret = mmc_test_set_blksize(test, 512);
287 if (ret)
288 return ret;
289
290 memset(test->buffer, 0, BUFFER_SIZE);
291
292 for (i = 0;i < BUFFER_SIZE / 512;i++) {
293 ret = mmc_test_transfer(test, 1, test->buffer + i * 512,
294 i * 512, 1, 512);
295 if (ret)
296 return ret;
297 }
298
299 return 0;
300}
301
302/*******************************************************************/ 452/*******************************************************************/
303/* Tests */ 453/* Tests */
304/*******************************************************************/ 454/*******************************************************************/
@@ -314,12 +464,15 @@ struct mmc_test_case {
314static int mmc_test_basic_write(struct mmc_test_card *test) 464static int mmc_test_basic_write(struct mmc_test_card *test)
315{ 465{
316 int ret; 466 int ret;
467 struct scatterlist sg;
317 468
318 ret = mmc_test_set_blksize(test, 512); 469 ret = mmc_test_set_blksize(test, 512);
319 if (ret) 470 if (ret)
320 return ret; 471 return ret;
321 472
322 ret = mmc_test_transfer(test, 1, test->buffer, 0, 1, 512); 473 sg_init_one(&sg, test->buffer, 512);
474
475 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
323 if (ret) 476 if (ret)
324 return ret; 477 return ret;
325 478
@@ -329,12 +482,15 @@ static int mmc_test_basic_write(struct mmc_test_card *test)
329static int mmc_test_basic_read(struct mmc_test_card *test) 482static int mmc_test_basic_read(struct mmc_test_card *test)
330{ 483{
331 int ret; 484 int ret;
485 struct scatterlist sg;
332 486
333 ret = mmc_test_set_blksize(test, 512); 487 ret = mmc_test_set_blksize(test, 512);
334 if (ret) 488 if (ret)
335 return ret; 489 return ret;
336 490
337 ret = mmc_test_transfer(test, 0, test->buffer, 0, 1, 512); 491 sg_init_one(&sg, test->buffer, 512);
492
493 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
338 if (ret) 494 if (ret)
339 return ret; 495 return ret;
340 496
@@ -344,8 +500,11 @@ static int mmc_test_basic_read(struct mmc_test_card *test)
344static int mmc_test_verify_write(struct mmc_test_card *test) 500static int mmc_test_verify_write(struct mmc_test_card *test)
345{ 501{
346 int ret; 502 int ret;
503 struct scatterlist sg;
504
505 sg_init_one(&sg, test->buffer, 512);
347 506
348 ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, 1, 512); 507 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
349 if (ret) 508 if (ret)
350 return ret; 509 return ret;
351 510
@@ -355,8 +514,11 @@ static int mmc_test_verify_write(struct mmc_test_card *test)
355static int mmc_test_verify_read(struct mmc_test_card *test) 514static int mmc_test_verify_read(struct mmc_test_card *test)
356{ 515{
357 int ret; 516 int ret;
517 struct scatterlist sg;
518
519 sg_init_one(&sg, test->buffer, 512);
358 520
359 ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, 1, 512); 521 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
360 if (ret) 522 if (ret)
361 return ret; 523 return ret;
362 524
@@ -367,6 +529,7 @@ static int mmc_test_multi_write(struct mmc_test_card *test)
367{ 529{
368 int ret; 530 int ret;
369 unsigned int size; 531 unsigned int size;
532 struct scatterlist sg;
370 533
371 if (test->card->host->max_blk_count == 1) 534 if (test->card->host->max_blk_count == 1)
372 return RESULT_UNSUP_HOST; 535 return RESULT_UNSUP_HOST;
@@ -379,8 +542,9 @@ static int mmc_test_multi_write(struct mmc_test_card *test)
379 if (size < 1024) 542 if (size < 1024)
380 return RESULT_UNSUP_HOST; 543 return RESULT_UNSUP_HOST;
381 544
382 ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, 545 sg_init_one(&sg, test->buffer, size);
383 size / 512, 512); 546
547 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
384 if (ret) 548 if (ret)
385 return ret; 549 return ret;
386 550
@@ -391,6 +555,7 @@ static int mmc_test_multi_read(struct mmc_test_card *test)
391{ 555{
392 int ret; 556 int ret;
393 unsigned int size; 557 unsigned int size;
558 struct scatterlist sg;
394 559
395 if (test->card->host->max_blk_count == 1) 560 if (test->card->host->max_blk_count == 1)
396 return RESULT_UNSUP_HOST; 561 return RESULT_UNSUP_HOST;
@@ -403,8 +568,9 @@ static int mmc_test_multi_read(struct mmc_test_card *test)
403 if (size < 1024) 568 if (size < 1024)
404 return RESULT_UNSUP_HOST; 569 return RESULT_UNSUP_HOST;
405 570
406 ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, 571 sg_init_one(&sg, test->buffer, size);
407 size / 512, 512); 572
573 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
408 if (ret) 574 if (ret)
409 return ret; 575 return ret;
410 576
@@ -414,13 +580,14 @@ static int mmc_test_multi_read(struct mmc_test_card *test)
414static int mmc_test_pow2_write(struct mmc_test_card *test) 580static int mmc_test_pow2_write(struct mmc_test_card *test)
415{ 581{
416 int ret, i; 582 int ret, i;
583 struct scatterlist sg;
417 584
418 if (!test->card->csd.write_partial) 585 if (!test->card->csd.write_partial)
419 return RESULT_UNSUP_CARD; 586 return RESULT_UNSUP_CARD;
420 587
421 for (i = 1; i < 512;i <<= 1) { 588 for (i = 1; i < 512;i <<= 1) {
422 ret = mmc_test_verified_transfer(test, 1, 589 sg_init_one(&sg, test->buffer, i);
423 test->buffer, 0, 1, i); 590 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
424 if (ret) 591 if (ret)
425 return ret; 592 return ret;
426 } 593 }
@@ -431,13 +598,14 @@ static int mmc_test_pow2_write(struct mmc_test_card *test)
431static int mmc_test_pow2_read(struct mmc_test_card *test) 598static int mmc_test_pow2_read(struct mmc_test_card *test)
432{ 599{
433 int ret, i; 600 int ret, i;
601 struct scatterlist sg;
434 602
435 if (!test->card->csd.read_partial) 603 if (!test->card->csd.read_partial)
436 return RESULT_UNSUP_CARD; 604 return RESULT_UNSUP_CARD;
437 605
438 for (i = 1; i < 512;i <<= 1) { 606 for (i = 1; i < 512;i <<= 1) {
439 ret = mmc_test_verified_transfer(test, 0, 607 sg_init_one(&sg, test->buffer, i);
440 test->buffer, 0, 1, i); 608 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
441 if (ret) 609 if (ret)
442 return ret; 610 return ret;
443 } 611 }
@@ -448,13 +616,14 @@ static int mmc_test_pow2_read(struct mmc_test_card *test)
448static int mmc_test_weird_write(struct mmc_test_card *test) 616static int mmc_test_weird_write(struct mmc_test_card *test)
449{ 617{
450 int ret, i; 618 int ret, i;
619 struct scatterlist sg;
451 620
452 if (!test->card->csd.write_partial) 621 if (!test->card->csd.write_partial)
453 return RESULT_UNSUP_CARD; 622 return RESULT_UNSUP_CARD;
454 623
455 for (i = 3; i < 512;i += 7) { 624 for (i = 3; i < 512;i += 7) {
456 ret = mmc_test_verified_transfer(test, 1, 625 sg_init_one(&sg, test->buffer, i);
457 test->buffer, 0, 1, i); 626 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
458 if (ret) 627 if (ret)
459 return ret; 628 return ret;
460 } 629 }
@@ -465,13 +634,14 @@ static int mmc_test_weird_write(struct mmc_test_card *test)
465static int mmc_test_weird_read(struct mmc_test_card *test) 634static int mmc_test_weird_read(struct mmc_test_card *test)
466{ 635{
467 int ret, i; 636 int ret, i;
637 struct scatterlist sg;
468 638
469 if (!test->card->csd.read_partial) 639 if (!test->card->csd.read_partial)
470 return RESULT_UNSUP_CARD; 640 return RESULT_UNSUP_CARD;
471 641
472 for (i = 3; i < 512;i += 7) { 642 for (i = 3; i < 512;i += 7) {
473 ret = mmc_test_verified_transfer(test, 0, 643 sg_init_one(&sg, test->buffer, i);
474 test->buffer, 0, 1, i); 644 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
475 if (ret) 645 if (ret)
476 return ret; 646 return ret;
477 } 647 }
@@ -482,10 +652,11 @@ static int mmc_test_weird_read(struct mmc_test_card *test)
482static int mmc_test_align_write(struct mmc_test_card *test) 652static int mmc_test_align_write(struct mmc_test_card *test)
483{ 653{
484 int ret, i; 654 int ret, i;
655 struct scatterlist sg;
485 656
486 for (i = 1;i < 4;i++) { 657 for (i = 1;i < 4;i++) {
487 ret = mmc_test_verified_transfer(test, 1, test->buffer + i, 658 sg_init_one(&sg, test->buffer + i, 512);
488 0, 1, 512); 659 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
489 if (ret) 660 if (ret)
490 return ret; 661 return ret;
491 } 662 }
@@ -496,10 +667,11 @@ static int mmc_test_align_write(struct mmc_test_card *test)
496static int mmc_test_align_read(struct mmc_test_card *test) 667static int mmc_test_align_read(struct mmc_test_card *test)
497{ 668{
498 int ret, i; 669 int ret, i;
670 struct scatterlist sg;
499 671
500 for (i = 1;i < 4;i++) { 672 for (i = 1;i < 4;i++) {
501 ret = mmc_test_verified_transfer(test, 0, test->buffer + i, 673 sg_init_one(&sg, test->buffer + i, 512);
502 0, 1, 512); 674 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
503 if (ret) 675 if (ret)
504 return ret; 676 return ret;
505 } 677 }
@@ -511,6 +683,7 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test)
511{ 683{
512 int ret, i; 684 int ret, i;
513 unsigned int size; 685 unsigned int size;
686 struct scatterlist sg;
514 687
515 if (test->card->host->max_blk_count == 1) 688 if (test->card->host->max_blk_count == 1)
516 return RESULT_UNSUP_HOST; 689 return RESULT_UNSUP_HOST;
@@ -524,8 +697,8 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test)
524 return RESULT_UNSUP_HOST; 697 return RESULT_UNSUP_HOST;
525 698
526 for (i = 1;i < 4;i++) { 699 for (i = 1;i < 4;i++) {
527 ret = mmc_test_verified_transfer(test, 1, test->buffer + i, 700 sg_init_one(&sg, test->buffer + i, size);
528 0, size / 512, 512); 701 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
529 if (ret) 702 if (ret)
530 return ret; 703 return ret;
531 } 704 }
@@ -537,6 +710,7 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test)
537{ 710{
538 int ret, i; 711 int ret, i;
539 unsigned int size; 712 unsigned int size;
713 struct scatterlist sg;
540 714
541 if (test->card->host->max_blk_count == 1) 715 if (test->card->host->max_blk_count == 1)
542 return RESULT_UNSUP_HOST; 716 return RESULT_UNSUP_HOST;
@@ -550,8 +724,8 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test)
550 return RESULT_UNSUP_HOST; 724 return RESULT_UNSUP_HOST;
551 725
552 for (i = 1;i < 4;i++) { 726 for (i = 1;i < 4;i++) {
553 ret = mmc_test_verified_transfer(test, 0, test->buffer + i, 727 sg_init_one(&sg, test->buffer + i, size);
554 0, size / 512, 512); 728 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
555 if (ret) 729 if (ret)
556 return ret; 730 return ret;
557 } 731 }
@@ -567,7 +741,7 @@ static int mmc_test_xfersize_write(struct mmc_test_card *test)
567 if (ret) 741 if (ret)
568 return ret; 742 return ret;
569 743
570 ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 1, 512); 744 ret = mmc_test_broken_transfer(test, 1, 512, 1);
571 if (ret) 745 if (ret)
572 return ret; 746 return ret;
573 747
@@ -582,7 +756,7 @@ static int mmc_test_xfersize_read(struct mmc_test_card *test)
582 if (ret) 756 if (ret)
583 return ret; 757 return ret;
584 758
585 ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 1, 512); 759 ret = mmc_test_broken_transfer(test, 1, 512, 0);
586 if (ret) 760 if (ret)
587 return ret; 761 return ret;
588 762
@@ -600,7 +774,7 @@ static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
600 if (ret) 774 if (ret)
601 return ret; 775 return ret;
602 776
603 ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 2, 512); 777 ret = mmc_test_broken_transfer(test, 2, 512, 1);
604 if (ret) 778 if (ret)
605 return ret; 779 return ret;
606 780
@@ -618,7 +792,7 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
618 if (ret) 792 if (ret)
619 return ret; 793 return ret;
620 794
621 ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 2, 512); 795 ret = mmc_test_broken_transfer(test, 2, 512, 0);
622 if (ret) 796 if (ret)
623 return ret; 797 return ret;
624 798
@@ -638,86 +812,86 @@ static const struct mmc_test_case mmc_test_cases[] = {
638 812
639 { 813 {
640 .name = "Basic write (with data verification)", 814 .name = "Basic write (with data verification)",
641 .prepare = mmc_test_prepare_verify_write, 815 .prepare = mmc_test_prepare_write,
642 .run = mmc_test_verify_write, 816 .run = mmc_test_verify_write,
643 .cleanup = mmc_test_cleanup_verify, 817 .cleanup = mmc_test_cleanup,
644 }, 818 },
645 819
646 { 820 {
647 .name = "Basic read (with data verification)", 821 .name = "Basic read (with data verification)",
648 .prepare = mmc_test_prepare_verify_read, 822 .prepare = mmc_test_prepare_read,
649 .run = mmc_test_verify_read, 823 .run = mmc_test_verify_read,
650 .cleanup = mmc_test_cleanup_verify, 824 .cleanup = mmc_test_cleanup,
651 }, 825 },
652 826
653 { 827 {
654 .name = "Multi-block write", 828 .name = "Multi-block write",
655 .prepare = mmc_test_prepare_verify_write, 829 .prepare = mmc_test_prepare_write,
656 .run = mmc_test_multi_write, 830 .run = mmc_test_multi_write,
657 .cleanup = mmc_test_cleanup_verify, 831 .cleanup = mmc_test_cleanup,
658 }, 832 },
659 833
660 { 834 {
661 .name = "Multi-block read", 835 .name = "Multi-block read",
662 .prepare = mmc_test_prepare_verify_read, 836 .prepare = mmc_test_prepare_read,
663 .run = mmc_test_multi_read, 837 .run = mmc_test_multi_read,
664 .cleanup = mmc_test_cleanup_verify, 838 .cleanup = mmc_test_cleanup,
665 }, 839 },
666 840
667 { 841 {
668 .name = "Power of two block writes", 842 .name = "Power of two block writes",
669 .prepare = mmc_test_prepare_verify_write, 843 .prepare = mmc_test_prepare_write,
670 .run = mmc_test_pow2_write, 844 .run = mmc_test_pow2_write,
671 .cleanup = mmc_test_cleanup_verify, 845 .cleanup = mmc_test_cleanup,
672 }, 846 },
673 847
674 { 848 {
675 .name = "Power of two block reads", 849 .name = "Power of two block reads",
676 .prepare = mmc_test_prepare_verify_read, 850 .prepare = mmc_test_prepare_read,
677 .run = mmc_test_pow2_read, 851 .run = mmc_test_pow2_read,
678 .cleanup = mmc_test_cleanup_verify, 852 .cleanup = mmc_test_cleanup,
679 }, 853 },
680 854
681 { 855 {
682 .name = "Weird sized block writes", 856 .name = "Weird sized block writes",
683 .prepare = mmc_test_prepare_verify_write, 857 .prepare = mmc_test_prepare_write,
684 .run = mmc_test_weird_write, 858 .run = mmc_test_weird_write,
685 .cleanup = mmc_test_cleanup_verify, 859 .cleanup = mmc_test_cleanup,
686 }, 860 },
687 861
688 { 862 {
689 .name = "Weird sized block reads", 863 .name = "Weird sized block reads",
690 .prepare = mmc_test_prepare_verify_read, 864 .prepare = mmc_test_prepare_read,
691 .run = mmc_test_weird_read, 865 .run = mmc_test_weird_read,
692 .cleanup = mmc_test_cleanup_verify, 866 .cleanup = mmc_test_cleanup,
693 }, 867 },
694 868
695 { 869 {
696 .name = "Badly aligned write", 870 .name = "Badly aligned write",
697 .prepare = mmc_test_prepare_verify_write, 871 .prepare = mmc_test_prepare_write,
698 .run = mmc_test_align_write, 872 .run = mmc_test_align_write,
699 .cleanup = mmc_test_cleanup_verify, 873 .cleanup = mmc_test_cleanup,
700 }, 874 },
701 875
702 { 876 {
703 .name = "Badly aligned read", 877 .name = "Badly aligned read",
704 .prepare = mmc_test_prepare_verify_read, 878 .prepare = mmc_test_prepare_read,
705 .run = mmc_test_align_read, 879 .run = mmc_test_align_read,
706 .cleanup = mmc_test_cleanup_verify, 880 .cleanup = mmc_test_cleanup,
707 }, 881 },
708 882
709 { 883 {
710 .name = "Badly aligned multi-block write", 884 .name = "Badly aligned multi-block write",
711 .prepare = mmc_test_prepare_verify_write, 885 .prepare = mmc_test_prepare_write,
712 .run = mmc_test_align_multi_write, 886 .run = mmc_test_align_multi_write,
713 .cleanup = mmc_test_cleanup_verify, 887 .cleanup = mmc_test_cleanup,
714 }, 888 },
715 889
716 { 890 {
717 .name = "Badly aligned multi-block read", 891 .name = "Badly aligned multi-block read",
718 .prepare = mmc_test_prepare_verify_read, 892 .prepare = mmc_test_prepare_read,
719 .run = mmc_test_align_multi_read, 893 .run = mmc_test_align_multi_read,
720 .cleanup = mmc_test_cleanup_verify, 894 .cleanup = mmc_test_cleanup,
721 }, 895 },
722 896
723 { 897 {
@@ -743,7 +917,7 @@ static const struct mmc_test_case mmc_test_cases[] = {
743 917
744static struct mutex mmc_test_lock; 918static struct mutex mmc_test_lock;
745 919
746static void mmc_test_run(struct mmc_test_card *test) 920static void mmc_test_run(struct mmc_test_card *test, int testcase)
747{ 921{
748 int i, ret; 922 int i, ret;
749 923
@@ -753,6 +927,9 @@ static void mmc_test_run(struct mmc_test_card *test)
753 mmc_claim_host(test->card->host); 927 mmc_claim_host(test->card->host);
754 928
755 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { 929 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
930 if (testcase && ((i + 1) != testcase))
931 continue;
932
756 printk(KERN_INFO "%s: Test case %d. %s...\n", 933 printk(KERN_INFO "%s: Test case %d. %s...\n",
757 mmc_hostname(test->card->host), i + 1, 934 mmc_hostname(test->card->host), i + 1,
758 mmc_test_cases[i].name); 935 mmc_test_cases[i].name);
@@ -824,9 +1001,12 @@ static ssize_t mmc_test_store(struct device *dev,
824{ 1001{
825 struct mmc_card *card; 1002 struct mmc_card *card;
826 struct mmc_test_card *test; 1003 struct mmc_test_card *test;
1004 int testcase;
827 1005
828 card = container_of(dev, struct mmc_card, dev); 1006 card = container_of(dev, struct mmc_card, dev);
829 1007
1008 testcase = simple_strtol(buf, NULL, 10);
1009
830 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); 1010 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
831 if (!test) 1011 if (!test)
832 return -ENOMEM; 1012 return -ENOMEM;
@@ -836,7 +1016,7 @@ static ssize_t mmc_test_store(struct device *dev,
836 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 1016 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
837 if (test->buffer) { 1017 if (test->buffer) {
838 mutex_lock(&mmc_test_lock); 1018 mutex_lock(&mmc_test_lock);
839 mmc_test_run(test); 1019 mmc_test_run(test, testcase);
840 mutex_unlock(&mmc_test_lock); 1020 mutex_unlock(&mmc_test_lock);
841 } 1021 }
842 1022
@@ -852,6 +1032,9 @@ static int mmc_test_probe(struct mmc_card *card)
852{ 1032{
853 int ret; 1033 int ret;
854 1034
1035 if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
1036 return -ENODEV;
1037
855 mutex_init(&mmc_test_lock); 1038 mutex_init(&mmc_test_lock);
856 1039
857 ret = device_create_file(&card->dev, &dev_attr_test); 1040 ret = device_create_file(&card->dev, &dev_attr_test);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index eeea84c309e6..78ad48718ab0 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -885,12 +885,14 @@ static void sdio_uart_set_termios(struct tty_struct *tty, struct ktermios *old_t
885 sdio_uart_release_func(port); 885 sdio_uart_release_func(port);
886} 886}
887 887
888static void sdio_uart_break_ctl(struct tty_struct *tty, int break_state) 888static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
889{ 889{
890 struct sdio_uart_port *port = tty->driver_data; 890 struct sdio_uart_port *port = tty->driver_data;
891 int result;
891 892
892 if (sdio_uart_claim_func(port) != 0) 893 result = sdio_uart_claim_func(port);
893 return; 894 if (result != 0)
895 return result;
894 896
895 if (break_state == -1) 897 if (break_state == -1)
896 port->lcr |= UART_LCR_SBC; 898 port->lcr |= UART_LCR_SBC;
@@ -899,6 +901,7 @@ static void sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
899 sdio_out(port, UART_LCR, port->lcr); 901 sdio_out(port, UART_LCR, port->lcr);
900 902
901 sdio_uart_release_func(port); 903 sdio_uart_release_func(port);
904 return 0;
902} 905}
903 906
904static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) 907static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 01ced4c5a61d..3ee5b8c3b5ce 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
@@ -295,6 +295,33 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
295EXPORT_SYMBOL(mmc_set_data_timeout); 295EXPORT_SYMBOL(mmc_set_data_timeout);
296 296
297/** 297/**
298 * mmc_align_data_size - pads a transfer size to a more optimal value
299 * @card: the MMC card associated with the data transfer
300 * @sz: original transfer size
301 *
302 * Pads the original data size with a number of extra bytes in
303 * order to avoid controller bugs and/or performance hits
304 * (e.g. some controllers revert to PIO for certain sizes).
305 *
306 * Returns the improved size, which might be unmodified.
307 *
308 * Note that this function is only relevant when issuing a
309 * single scatter gather entry.
310 */
311unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
312{
313 /*
314 * FIXME: We don't have a system for the controller to tell
315 * the core about its problems yet, so for now we just 32-bit
316 * align the size.
317 */
318 sz = ((sz + 3) / 4) * 4;
319
320 return sz;
321}
322EXPORT_SYMBOL(mmc_align_data_size);
323
324/**
298 * __mmc_claim_host - exclusively claim a host 325 * __mmc_claim_host - exclusively claim a host
299 * @host: mmc host to claim 326 * @host: mmc host to claim
300 * @abort: whether or not the operation should be aborted 327 * @abort: whether or not the operation should be aborted
@@ -638,6 +665,9 @@ void mmc_rescan(struct work_struct *work)
638 */ 665 */
639 mmc_bus_put(host); 666 mmc_bus_put(host);
640 667
668 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
669 goto out;
670
641 mmc_claim_host(host); 671 mmc_claim_host(host);
642 672
643 mmc_power_up(host); 673 mmc_power_up(host);
@@ -652,7 +682,7 @@ void mmc_rescan(struct work_struct *work)
652 if (!err) { 682 if (!err) {
653 if (mmc_attach_sdio(host, ocr)) 683 if (mmc_attach_sdio(host, ocr))
654 mmc_power_off(host); 684 mmc_power_off(host);
655 return; 685 goto out;
656 } 686 }
657 687
658 /* 688 /*
@@ -662,7 +692,7 @@ void mmc_rescan(struct work_struct *work)
662 if (!err) { 692 if (!err) {
663 if (mmc_attach_sd(host, ocr)) 693 if (mmc_attach_sd(host, ocr))
664 mmc_power_off(host); 694 mmc_power_off(host);
665 return; 695 goto out;
666 } 696 }
667 697
668 /* 698 /*
@@ -672,7 +702,7 @@ void mmc_rescan(struct work_struct *work)
672 if (!err) { 702 if (!err) {
673 if (mmc_attach_mmc(host, ocr)) 703 if (mmc_attach_mmc(host, ocr))
674 mmc_power_off(host); 704 mmc_power_off(host);
675 return; 705 goto out;
676 } 706 }
677 707
678 mmc_release_host(host); 708 mmc_release_host(host);
@@ -683,6 +713,9 @@ void mmc_rescan(struct work_struct *work)
683 713
684 mmc_bus_put(host); 714 mmc_bus_put(host);
685 } 715 }
716out:
717 if (host->caps & MMC_CAP_NEEDS_POLL)
718 mmc_schedule_delayed_work(&host->detect, HZ);
686} 719}
687 720
688void mmc_start_host(struct mmc_host *host) 721void mmc_start_host(struct mmc_host *host)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3da29eef8f7d..fdd7c760be8c 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -288,7 +288,7 @@ static struct device_type mmc_type = {
288/* 288/*
289 * Handle the detection and initialisation of a card. 289 * Handle the detection and initialisation of a card.
290 * 290 *
291 * In the case of a resume, "curcard" will contain the card 291 * In the case of a resume, "oldcard" will contain the card
292 * we're trying to reinitialise. 292 * we're trying to reinitialise.
293 */ 293 */
294static int mmc_init_card(struct mmc_host *host, u32 ocr, 294static int mmc_init_card(struct mmc_host *host, u32 ocr,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 7ef3b15c5e3d..26fc098d77cd 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -326,7 +326,7 @@ static struct device_type sd_type = {
326/* 326/*
327 * Handle the detection and initialisation of a card. 327 * Handle the detection and initialisation of a card.
328 * 328 *
329 * In the case of a resume, "curcard" will contain the card 329 * In the case of a resume, "oldcard" will contain the card
330 * we're trying to reinitialise. 330 * we're trying to reinitialise.
331 */ 331 */
332static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, 332static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
@@ -494,13 +494,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
494 * Check if read-only switch is active. 494 * Check if read-only switch is active.
495 */ 495 */
496 if (!oldcard) { 496 if (!oldcard) {
497 if (!host->ops->get_ro) { 497 if (!host->ops->get_ro || host->ops->get_ro(host) < 0) {
498 printk(KERN_WARNING "%s: host does not " 498 printk(KERN_WARNING "%s: host does not "
499 "support reading read-only " 499 "support reading read-only "
500 "switch. assuming write-enable.\n", 500 "switch. assuming write-enable.\n",
501 mmc_hostname(host)); 501 mmc_hostname(host));
502 } else { 502 } else {
503 if (host->ops->get_ro(host)) 503 if (host->ops->get_ro(host) > 0)
504 mmc_card_set_readonly(card); 504 mmc_card_set_readonly(card);
505 } 505 }
506 } 506 }
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index d5e51b1c7b3f..956bd7677502 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -129,6 +129,12 @@ static int cistpl_funce_func(struct sdio_func *func,
129 /* TPLFE_MAX_BLK_SIZE */ 129 /* TPLFE_MAX_BLK_SIZE */
130 func->max_blksize = buf[12] | (buf[13] << 8); 130 func->max_blksize = buf[12] | (buf[13] << 8);
131 131
132 /* TPLFE_ENABLE_TIMEOUT_VAL, present in ver 1.1 and above */
133 if (vsn > SDIO_SDIO_REV_1_00)
134 func->enable_timeout = (buf[28] | (buf[29] << 8)) * 10;
135 else
136 func->enable_timeout = jiffies_to_msecs(HZ);
137
132 return 0; 138 return 0;
133} 139}
134 140
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 625b92ce9cef..f61fc2d4cd0a 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/mmc/core/sdio_io.c 2 * linux/drivers/mmc/core/sdio_io.c
3 * 3 *
4 * Copyright 2007 Pierre Ossman 4 * Copyright 2007-2008 Pierre Ossman
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -76,11 +76,7 @@ int sdio_enable_func(struct sdio_func *func)
76 if (ret) 76 if (ret)
77 goto err; 77 goto err;
78 78
79 /* 79 timeout = jiffies + msecs_to_jiffies(func->enable_timeout);
80 * FIXME: This should timeout based on information in the CIS,
81 * but we don't have card to parse that yet.
82 */
83 timeout = jiffies + HZ;
84 80
85 while (1) { 81 while (1) {
86 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, &reg); 82 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, &reg);
@@ -167,10 +163,8 @@ int sdio_set_block_size(struct sdio_func *func, unsigned blksz)
167 return -EINVAL; 163 return -EINVAL;
168 164
169 if (blksz == 0) { 165 if (blksz == 0) {
170 blksz = min(min( 166 blksz = min(func->max_blksize, func->card->host->max_blk_size);
171 func->max_blksize, 167 blksz = min(blksz, 512u);
172 func->card->host->max_blk_size),
173 512u);
174 } 168 }
175 169
176 ret = mmc_io_rw_direct(func->card, 1, 0, 170 ret = mmc_io_rw_direct(func->card, 1, 0,
@@ -186,9 +180,116 @@ int sdio_set_block_size(struct sdio_func *func, unsigned blksz)
186 func->cur_blksize = blksz; 180 func->cur_blksize = blksz;
187 return 0; 181 return 0;
188} 182}
189
190EXPORT_SYMBOL_GPL(sdio_set_block_size); 183EXPORT_SYMBOL_GPL(sdio_set_block_size);
191 184
185/*
186 * Calculate the maximum byte mode transfer size
187 */
188static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
189{
190 unsigned mval = min(func->card->host->max_seg_size,
191 func->card->host->max_blk_size);
192 mval = min(mval, func->max_blksize);
193 return min(mval, 512u); /* maximum size for byte mode */
194}
195
196/**
197 * sdio_align_size - pads a transfer size to a more optimal value
198 * @func: SDIO function
199 * @sz: original transfer size
200 *
201 * Pads the original data size with a number of extra bytes in
202 * order to avoid controller bugs and/or performance hits
203 * (e.g. some controllers revert to PIO for certain sizes).
204 *
205 * If possible, it will also adjust the size so that it can be
206 * handled in just a single request.
207 *
208 * Returns the improved size, which might be unmodified.
209 */
210unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
211{
212 unsigned int orig_sz;
213 unsigned int blk_sz, byte_sz;
214 unsigned chunk_sz;
215
216 orig_sz = sz;
217
218 /*
219 * Do a first check with the controller, in case it
220 * wants to increase the size up to a point where it
221 * might need more than one block.
222 */
223 sz = mmc_align_data_size(func->card, sz);
224
225 /*
226 * If we can still do this with just a byte transfer, then
227 * we're done.
228 */
229 if (sz <= sdio_max_byte_size(func))
230 return sz;
231
232 if (func->card->cccr.multi_block) {
233 /*
234 * Check if the transfer is already block aligned
235 */
236 if ((sz % func->cur_blksize) == 0)
237 return sz;
238
239 /*
240 * Realign it so that it can be done with one request,
241 * and recheck if the controller still likes it.
242 */
243 blk_sz = ((sz + func->cur_blksize - 1) /
244 func->cur_blksize) * func->cur_blksize;
245 blk_sz = mmc_align_data_size(func->card, blk_sz);
246
247 /*
248 * This value is only good if it is still just
249 * one request.
250 */
251 if ((blk_sz % func->cur_blksize) == 0)
252 return blk_sz;
253
254 /*
255 * We failed to do one request, but at least try to
256 * pad the remainder properly.
257 */
258 byte_sz = mmc_align_data_size(func->card,
259 sz % func->cur_blksize);
260 if (byte_sz <= sdio_max_byte_size(func)) {
261 blk_sz = sz / func->cur_blksize;
262 return blk_sz * func->cur_blksize + byte_sz;
263 }
264 } else {
265 /*
266 * We need multiple requests, so first check that the
267 * controller can handle the chunk size;
268 */
269 chunk_sz = mmc_align_data_size(func->card,
270 sdio_max_byte_size(func));
271 if (chunk_sz == sdio_max_byte_size(func)) {
272 /*
273 * Fix up the size of the remainder (if any)
274 */
275 byte_sz = orig_sz % chunk_sz;
276 if (byte_sz) {
277 byte_sz = mmc_align_data_size(func->card,
278 byte_sz);
279 }
280
281 return (orig_sz / chunk_sz) * chunk_sz + byte_sz;
282 }
283 }
284
285 /*
286 * The controller is simply incapable of transferring the size
287 * we want in decent manner, so just return the original size.
288 */
289 return orig_sz;
290}
291EXPORT_SYMBOL_GPL(sdio_align_size);
292
192/* Split an arbitrarily sized data transfer into several 293/* Split an arbitrarily sized data transfer into several
193 * IO_RW_EXTENDED commands. */ 294 * IO_RW_EXTENDED commands. */
194static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, 295static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
@@ -199,14 +300,13 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
199 int ret; 300 int ret;
200 301
201 /* Do the bulk of the transfer using block mode (if supported). */ 302 /* Do the bulk of the transfer using block mode (if supported). */
202 if (func->card->cccr.multi_block) { 303 if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
203 /* Blocks per command is limited by host count, host transfer 304 /* Blocks per command is limited by host count, host transfer
204 * size (we only use a single sg entry) and the maximum for 305 * size (we only use a single sg entry) and the maximum for
205 * IO_RW_EXTENDED of 511 blocks. */ 306 * IO_RW_EXTENDED of 511 blocks. */
206 max_blocks = min(min( 307 max_blocks = min(func->card->host->max_blk_count,
207 func->card->host->max_blk_count, 308 func->card->host->max_seg_size / func->cur_blksize);
208 func->card->host->max_seg_size / func->cur_blksize), 309 max_blocks = min(max_blocks, 511u);
209 511u);
210 310
211 while (remainder > func->cur_blksize) { 311 while (remainder > func->cur_blksize) {
212 unsigned blocks; 312 unsigned blocks;
@@ -231,11 +331,7 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
231 331
232 /* Write the remainder using byte mode. */ 332 /* Write the remainder using byte mode. */
233 while (remainder > 0) { 333 while (remainder > 0) {
234 size = remainder; 334 size = min(remainder, sdio_max_byte_size(func));
235 if (size > func->cur_blksize)
236 size = func->cur_blksize;
237 if (size > 512)
238 size = 512; /* maximum size for byte mode */
239 335
240 ret = mmc_io_rw_extended(func->card, write, func->num, addr, 336 ret = mmc_io_rw_extended(func->card, write, func->num, addr,
241 incr_addr, buf, 1, size); 337 incr_addr, buf, 1, size);
@@ -260,11 +356,10 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
260 * function. If there is a problem reading the address, 0xff 356 * function. If there is a problem reading the address, 0xff
261 * is returned and @err_ret will contain the error code. 357 * is returned and @err_ret will contain the error code.
262 */ 358 */
263unsigned char sdio_readb(struct sdio_func *func, unsigned int addr, 359u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
264 int *err_ret)
265{ 360{
266 int ret; 361 int ret;
267 unsigned char val; 362 u8 val;
268 363
269 BUG_ON(!func); 364 BUG_ON(!func);
270 365
@@ -293,8 +388,7 @@ EXPORT_SYMBOL_GPL(sdio_readb);
293 * function. @err_ret will contain the status of the actual 388 * function. @err_ret will contain the status of the actual
294 * transfer. 389 * transfer.
295 */ 390 */
296void sdio_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, 391void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
297 int *err_ret)
298{ 392{
299 int ret; 393 int ret;
300 394
@@ -355,7 +449,6 @@ int sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr,
355{ 449{
356 return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count); 450 return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count);
357} 451}
358
359EXPORT_SYMBOL_GPL(sdio_readsb); 452EXPORT_SYMBOL_GPL(sdio_readsb);
360 453
361/** 454/**
@@ -385,8 +478,7 @@ EXPORT_SYMBOL_GPL(sdio_writesb);
385 * function. If there is a problem reading the address, 0xffff 478 * function. If there is a problem reading the address, 0xffff
386 * is returned and @err_ret will contain the error code. 479 * is returned and @err_ret will contain the error code.
387 */ 480 */
388unsigned short sdio_readw(struct sdio_func *func, unsigned int addr, 481u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret)
389 int *err_ret)
390{ 482{
391 int ret; 483 int ret;
392 484
@@ -400,7 +492,7 @@ unsigned short sdio_readw(struct sdio_func *func, unsigned int addr,
400 return 0xFFFF; 492 return 0xFFFF;
401 } 493 }
402 494
403 return le16_to_cpu(*(u16*)func->tmpbuf); 495 return le16_to_cpup((__le16 *)func->tmpbuf);
404} 496}
405EXPORT_SYMBOL_GPL(sdio_readw); 497EXPORT_SYMBOL_GPL(sdio_readw);
406 498
@@ -415,12 +507,11 @@ EXPORT_SYMBOL_GPL(sdio_readw);
415 * function. @err_ret will contain the status of the actual 507 * function. @err_ret will contain the status of the actual
416 * transfer. 508 * transfer.
417 */ 509 */
418void sdio_writew(struct sdio_func *func, unsigned short b, unsigned int addr, 510void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret)
419 int *err_ret)
420{ 511{
421 int ret; 512 int ret;
422 513
423 *(u16*)func->tmpbuf = cpu_to_le16(b); 514 *(__le16 *)func->tmpbuf = cpu_to_le16(b);
424 515
425 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2); 516 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2);
426 if (err_ret) 517 if (err_ret)
@@ -439,8 +530,7 @@ EXPORT_SYMBOL_GPL(sdio_writew);
439 * 0xffffffff is returned and @err_ret will contain the error 530 * 0xffffffff is returned and @err_ret will contain the error
440 * code. 531 * code.
441 */ 532 */
442unsigned long sdio_readl(struct sdio_func *func, unsigned int addr, 533u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret)
443 int *err_ret)
444{ 534{
445 int ret; 535 int ret;
446 536
@@ -454,7 +544,7 @@ unsigned long sdio_readl(struct sdio_func *func, unsigned int addr,
454 return 0xFFFFFFFF; 544 return 0xFFFFFFFF;
455 } 545 }
456 546
457 return le32_to_cpu(*(u32*)func->tmpbuf); 547 return le32_to_cpup((__le32 *)func->tmpbuf);
458} 548}
459EXPORT_SYMBOL_GPL(sdio_readl); 549EXPORT_SYMBOL_GPL(sdio_readl);
460 550
@@ -469,12 +559,11 @@ EXPORT_SYMBOL_GPL(sdio_readl);
469 * function. @err_ret will contain the status of the actual 559 * function. @err_ret will contain the status of the actual
470 * transfer. 560 * transfer.
471 */ 561 */
472void sdio_writel(struct sdio_func *func, unsigned long b, unsigned int addr, 562void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret)
473 int *err_ret)
474{ 563{
475 int ret; 564 int ret;
476 565
477 *(u32*)func->tmpbuf = cpu_to_le32(b); 566 *(__le32 *)func->tmpbuf = cpu_to_le32(b);
478 567
479 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4); 568 ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4);
480 if (err_ret) 569 if (err_ret)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index dead61754ad7..dc6f2579f85c 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -26,18 +26,31 @@ config MMC_PXA
26 26
27config MMC_SDHCI 27config MMC_SDHCI
28 tristate "Secure Digital Host Controller Interface support" 28 tristate "Secure Digital Host Controller Interface support"
29 depends on PCI 29 depends on HAS_DMA
30 help 30 help
31 This select the generic Secure Digital Host Controller Interface. 31 This selects the generic Secure Digital Host Controller Interface.
32 It is used by manufacturers such as Texas Instruments(R), Ricoh(R) 32 It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
33 and Toshiba(R). Most controllers found in laptops are of this type. 33 and Toshiba(R). Most controllers found in laptops are of this type.
34
35 If you have a controller with this interface, say Y or M here. You
36 also need to enable an appropriate bus interface.
37
38 If unsure, say N.
39
40config MMC_SDHCI_PCI
41 tristate "SDHCI support on PCI bus"
42 depends on MMC_SDHCI && PCI
43 help
44 This selects the PCI Secure Digital Host Controller Interface.
45 Most controllers found today are PCI devices.
46
34 If you have a controller with this interface, say Y or M here. 47 If you have a controller with this interface, say Y or M here.
35 48
36 If unsure, say N. 49 If unsure, say N.
37 50
38config MMC_RICOH_MMC 51config MMC_RICOH_MMC
39 tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)" 52 tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)"
40 depends on PCI && EXPERIMENTAL && MMC_SDHCI 53 depends on MMC_SDHCI_PCI
41 help 54 help
42 This selects the disabler for the Ricoh MMC Controller. This 55 This selects the disabler for the Ricoh MMC Controller. This
43 proprietary controller is unnecessary because the SDHCI driver 56 proprietary controller is unnecessary because the SDHCI driver
@@ -91,6 +104,16 @@ config MMC_AT91
91 104
92 If unsure, say N. 105 If unsure, say N.
93 106
107config MMC_ATMELMCI
108 tristate "Atmel Multimedia Card Interface support"
109 depends on AVR32
110 help
111 This selects the Atmel Multimedia Card Interface driver. If
112 you have an AT32 (AVR32) platform with a Multimedia Card
113 slot, say Y or M here.
114
115 If unsure, say N.
116
94config MMC_IMX 117config MMC_IMX
95 tristate "Motorola i.MX Multimedia Card Interface support" 118 tristate "Motorola i.MX Multimedia Card Interface support"
96 depends on ARCH_IMX 119 depends on ARCH_IMX
@@ -130,3 +153,24 @@ config MMC_SPI
130 153
131 If unsure, or if your system has no SPI master driver, say N. 154 If unsure, or if your system has no SPI master driver, say N.
132 155
156config MMC_S3C
157 tristate "Samsung S3C SD/MMC Card Interface support"
158 depends on ARCH_S3C2410 && MMC
159 help
160 This selects a driver for the MCI interface found in
161 Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
162 If you have a board based on one of those and a MMC/SD
163 slot, say Y or M here.
164
165 If unsure, say N.
166
167config MMC_SDRICOH_CS
168 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
169 depends on EXPERIMENTAL && MMC && PCI && PCMCIA
170 help
171 Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA
172 card whenever you insert a MMC or SD card into the card slot.
173
174 To compile this driver as a module, choose M here: the
175 module will be called sdricoh_cs.
176
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3877c87e6da2..db52eebfb50e 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -10,11 +10,15 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
10obj-$(CONFIG_MMC_PXA) += pxamci.o 10obj-$(CONFIG_MMC_PXA) += pxamci.o
11obj-$(CONFIG_MMC_IMX) += imxmmc.o 11obj-$(CONFIG_MMC_IMX) += imxmmc.o
12obj-$(CONFIG_MMC_SDHCI) += sdhci.o 12obj-$(CONFIG_MMC_SDHCI) += sdhci.o
13obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
13obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o 14obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
14obj-$(CONFIG_MMC_WBSD) += wbsd.o 15obj-$(CONFIG_MMC_WBSD) += wbsd.o
15obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 16obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
16obj-$(CONFIG_MMC_OMAP) += omap.o 17obj-$(CONFIG_MMC_OMAP) += omap.o
17obj-$(CONFIG_MMC_AT91) += at91_mci.o 18obj-$(CONFIG_MMC_AT91) += at91_mci.o
19obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
18obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o 20obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
19obj-$(CONFIG_MMC_SPI) += mmc_spi.o 21obj-$(CONFIG_MMC_SPI) += mmc_spi.o
22obj-$(CONFIG_MMC_S3C) += s3cmci.o
23obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
20 24
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 8979ad330a4d..f15e2064305c 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -125,9 +125,72 @@ struct at91mci_host
125 125
126 /* Latest in the scatterlist that has been enabled for transfer */ 126 /* Latest in the scatterlist that has been enabled for transfer */
127 int transfer_index; 127 int transfer_index;
128
129 /* Timer for timeouts */
130 struct timer_list timer;
128}; 131};
129 132
130/* 133/*
134 * Reset the controller and restore most of the state
135 */
136static void at91_reset_host(struct at91mci_host *host)
137{
138 unsigned long flags;
139 u32 mr;
140 u32 sdcr;
141 u32 dtor;
142 u32 imr;
143
144 local_irq_save(flags);
145 imr = at91_mci_read(host, AT91_MCI_IMR);
146
147 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
148
149 /* save current state */
150 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
151 sdcr = at91_mci_read(host, AT91_MCI_SDCR);
152 dtor = at91_mci_read(host, AT91_MCI_DTOR);
153
154 /* reset the controller */
155 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
156
157 /* restore state */
158 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
159 at91_mci_write(host, AT91_MCI_MR, mr);
160 at91_mci_write(host, AT91_MCI_SDCR, sdcr);
161 at91_mci_write(host, AT91_MCI_DTOR, dtor);
162 at91_mci_write(host, AT91_MCI_IER, imr);
163
164 /* make sure sdio interrupts will fire */
165 at91_mci_read(host, AT91_MCI_SR);
166
167 local_irq_restore(flags);
168}
169
170static void at91_timeout_timer(unsigned long data)
171{
172 struct at91mci_host *host;
173
174 host = (struct at91mci_host *)data;
175
176 if (host->request) {
177 dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
178
179 if (host->cmd && host->cmd->data) {
180 host->cmd->data->error = -ETIMEDOUT;
181 } else {
182 if (host->cmd)
183 host->cmd->error = -ETIMEDOUT;
184 else
185 host->request->cmd->error = -ETIMEDOUT;
186 }
187
188 at91_reset_host(host);
189 mmc_request_done(host->mmc, host->request);
190 }
191}
192
193/*
131 * Copy from sg to a dma block - used for transfers 194 * Copy from sg to a dma block - used for transfers
132 */ 195 */
133static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data) 196static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
@@ -135,9 +198,14 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
135 unsigned int len, i, size; 198 unsigned int len, i, size;
136 unsigned *dmabuf = host->buffer; 199 unsigned *dmabuf = host->buffer;
137 200
138 size = host->total_length; 201 size = data->blksz * data->blocks;
139 len = data->sg_len; 202 len = data->sg_len;
140 203
204 /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
205 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
206 if (host->total_length == 12)
207 memset(dmabuf, 0, 12);
208
141 /* 209 /*
142 * Just loop through all entries. Size might not 210 * Just loop through all entries. Size might not
143 * be the entire list though so make sure that 211 * be the entire list though so make sure that
@@ -159,9 +227,10 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
159 227
160 for (index = 0; index < (amount / 4); index++) 228 for (index = 0; index < (amount / 4); index++)
161 *dmabuf++ = swab32(sgbuffer[index]); 229 *dmabuf++ = swab32(sgbuffer[index]);
162 } 230 } else {
163 else
164 memcpy(dmabuf, sgbuffer, amount); 231 memcpy(dmabuf, sgbuffer, amount);
232 dmabuf += amount;
233 }
165 234
166 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); 235 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
167 236
@@ -233,11 +302,11 @@ static void at91_mci_pre_dma_read(struct at91mci_host *host)
233 302
234 if (i == 0) { 303 if (i == 0) {
235 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address); 304 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
236 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4); 305 at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
237 } 306 }
238 else { 307 else {
239 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address); 308 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
240 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4); 309 at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
241 } 310 }
242 } 311 }
243 312
@@ -277,8 +346,6 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
277 346
278 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE); 347 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
279 348
280 data->bytes_xfered += sg->length;
281
282 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ 349 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
283 unsigned int *buffer; 350 unsigned int *buffer;
284 int index; 351 int index;
@@ -294,6 +361,8 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
294 } 361 }
295 362
296 flush_dcache_page(sg_page(sg)); 363 flush_dcache_page(sg_page(sg));
364
365 data->bytes_xfered += sg->length;
297 } 366 }
298 367
299 /* Is there another transfer to trigger? */ 368 /* Is there another transfer to trigger? */
@@ -334,10 +403,32 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host)
334 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); 403 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
335 } else 404 } else
336 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); 405 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
406}
407
408/*
409 * Update bytes tranfered count during a write operation
410 */
411static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
412{
413 struct mmc_data *data;
414
415 /* always deal with the effective request (and not the current cmd) */
416
417 if (host->request->cmd && host->request->cmd->error != 0)
418 return;
337 419
338 data->bytes_xfered = host->total_length; 420 if (host->request->data) {
421 data = host->request->data;
422 if (data->flags & MMC_DATA_WRITE) {
423 /* card is in IDLE mode now */
424 pr_debug("-> bytes_xfered %d, total_length = %d\n",
425 data->bytes_xfered, host->total_length);
426 data->bytes_xfered = data->blksz * data->blocks;
427 }
428 }
339} 429}
340 430
431
341/*Handle after command sent ready*/ 432/*Handle after command sent ready*/
342static int at91_mci_handle_cmdrdy(struct at91mci_host *host) 433static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
343{ 434{
@@ -350,8 +441,7 @@ static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
350 } else return 1; 441 } else return 1;
351 } else if (host->cmd->data->flags & MMC_DATA_WRITE) { 442 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
352 /*After sendding multi-block-write command, start DMA transfer*/ 443 /*After sendding multi-block-write command, start DMA transfer*/
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE); 444 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
354 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
355 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 445 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
356 } 446 }
357 447
@@ -430,11 +520,19 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
430 520
431 if (data) { 521 if (data) {
432 522
433 if ( data->blksz & 0x3 ) { 523 if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
434 pr_debug("Unsupported block size\n"); 524 if (data->blksz & 0x3) {
435 cmd->error = -EINVAL; 525 pr_debug("Unsupported block size\n");
436 mmc_request_done(host->mmc, host->request); 526 cmd->error = -EINVAL;
437 return; 527 mmc_request_done(host->mmc, host->request);
528 return;
529 }
530 if (data->flags & MMC_DATA_STREAM) {
531 pr_debug("Stream commands not supported\n");
532 cmd->error = -EINVAL;
533 mmc_request_done(host->mmc, host->request);
534 return;
535 }
438 } 536 }
439 537
440 block_length = data->blksz; 538 block_length = data->blksz;
@@ -481,8 +579,16 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
481 ier = AT91_MCI_CMDRDY; 579 ier = AT91_MCI_CMDRDY;
482 } else { 580 } else {
483 /* zero block length and PDC mode */ 581 /* zero block length and PDC mode */
484 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; 582 mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
485 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE); 583 mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
584 mr |= (block_length << 16);
585 mr |= AT91_MCI_PDCMODE;
586 at91_mci_write(host, AT91_MCI_MR, mr);
587
588 if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
589 at91_mci_write(host, AT91_MCI_BLKR,
590 AT91_MCI_BLKR_BCNT(blocks) |
591 AT91_MCI_BLKR_BLKLEN(block_length));
486 592
487 /* 593 /*
488 * Disable the PDC controller 594 * Disable the PDC controller
@@ -508,6 +614,13 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
508 * Handle a write 614 * Handle a write
509 */ 615 */
510 host->total_length = block_length * blocks; 616 host->total_length = block_length * blocks;
617 /*
618 * AT91SAM926[0/3] Data Write Operation and
619 * number of bytes erratum
620 */
621 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
622 if (host->total_length < 12)
623 host->total_length = 12;
511 host->buffer = dma_alloc_coherent(NULL, 624 host->buffer = dma_alloc_coherent(NULL,
512 host->total_length, 625 host->total_length,
513 &host->physical_address, GFP_KERNEL); 626 &host->physical_address, GFP_KERNEL);
@@ -517,7 +630,9 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
517 pr_debug("Transmitting %d bytes\n", host->total_length); 630 pr_debug("Transmitting %d bytes\n", host->total_length);
518 631
519 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); 632 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
520 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4); 633 at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
634 host->total_length : host->total_length / 4);
635
521 ier = AT91_MCI_CMDRDY; 636 ier = AT91_MCI_CMDRDY;
522 } 637 }
523 } 638 }
@@ -552,20 +667,26 @@ static void at91_mci_process_next(struct at91mci_host *host)
552 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { 667 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
553 host->flags |= FL_SENT_STOP; 668 host->flags |= FL_SENT_STOP;
554 at91_mci_send_command(host, host->request->stop); 669 at91_mci_send_command(host, host->request->stop);
555 } 670 } else {
556 else 671 del_timer(&host->timer);
672 /* the at91rm9200 mci controller hangs after some transfers,
673 * and the workaround is to reset it after each transfer.
674 */
675 if (cpu_is_at91rm9200())
676 at91_reset_host(host);
557 mmc_request_done(host->mmc, host->request); 677 mmc_request_done(host->mmc, host->request);
678 }
558} 679}
559 680
560/* 681/*
561 * Handle a command that has been completed 682 * Handle a command that has been completed
562 */ 683 */
563static void at91_mci_completed_command(struct at91mci_host *host) 684static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
564{ 685{
565 struct mmc_command *cmd = host->cmd; 686 struct mmc_command *cmd = host->cmd;
566 unsigned int status; 687 struct mmc_data *data = cmd->data;
567 688
568 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); 689 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
569 690
570 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); 691 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
571 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); 692 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
@@ -577,25 +698,34 @@ static void at91_mci_completed_command(struct at91mci_host *host)
577 host->buffer = NULL; 698 host->buffer = NULL;
578 } 699 }
579 700
580 status = at91_mci_read(host, AT91_MCI_SR); 701 pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
581 702 status, at91_mci_read(host, AT91_MCI_SR),
582 pr_debug("Status = %08X [%08X %08X %08X %08X]\n", 703 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
583 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
584 704
585 if (status & AT91_MCI_ERRORS) { 705 if (status & AT91_MCI_ERRORS) {
586 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { 706 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
587 cmd->error = 0; 707 cmd->error = 0;
588 } 708 }
589 else { 709 else {
590 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE)) 710 if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
591 cmd->error = -ETIMEDOUT; 711 if (data) {
592 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE)) 712 if (status & AT91_MCI_DTOE)
593 cmd->error = -EILSEQ; 713 data->error = -ETIMEDOUT;
594 else 714 else if (status & AT91_MCI_DCRCE)
595 cmd->error = -EIO; 715 data->error = -EILSEQ;
716 }
717 } else {
718 if (status & AT91_MCI_RTOE)
719 cmd->error = -ETIMEDOUT;
720 else if (status & AT91_MCI_RCRCE)
721 cmd->error = -EILSEQ;
722 else
723 cmd->error = -EIO;
724 }
596 725
597 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n", 726 pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
598 cmd->error, cmd->opcode, cmd->retries); 727 cmd->error, data ? data->error : 0,
728 cmd->opcode, cmd->retries);
599 } 729 }
600 } 730 }
601 else 731 else
@@ -613,6 +743,8 @@ static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
613 host->request = mrq; 743 host->request = mrq;
614 host->flags = 0; 744 host->flags = 0;
615 745
746 mod_timer(&host->timer, jiffies + HZ);
747
616 at91_mci_process_next(host); 748 at91_mci_process_next(host);
617} 749}
618 750
@@ -736,6 +868,7 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
736 868
737 if (int_status & AT91_MCI_NOTBUSY) { 869 if (int_status & AT91_MCI_NOTBUSY) {
738 pr_debug("Card is ready\n"); 870 pr_debug("Card is ready\n");
871 at91_mci_update_bytes_xfered(host);
739 completed = 1; 872 completed = 1;
740 } 873 }
741 874
@@ -744,9 +877,21 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
744 877
745 if (int_status & AT91_MCI_BLKE) { 878 if (int_status & AT91_MCI_BLKE) {
746 pr_debug("Block transfer has ended\n"); 879 pr_debug("Block transfer has ended\n");
747 completed = 1; 880 if (host->request->data && host->request->data->blocks > 1) {
881 /* multi block write : complete multi write
882 * command and send stop */
883 completed = 1;
884 } else {
885 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
886 }
748 } 887 }
749 888
889 if (int_status & AT91_MCI_SDIOIRQA)
890 mmc_signal_sdio_irq(host->mmc);
891
892 if (int_status & AT91_MCI_SDIOIRQB)
893 mmc_signal_sdio_irq(host->mmc);
894
750 if (int_status & AT91_MCI_TXRDY) 895 if (int_status & AT91_MCI_TXRDY)
751 pr_debug("Ready to transmit\n"); 896 pr_debug("Ready to transmit\n");
752 897
@@ -761,10 +906,10 @@ static irqreturn_t at91_mci_irq(int irq, void *devid)
761 906
762 if (completed) { 907 if (completed) {
763 pr_debug("Completed command\n"); 908 pr_debug("Completed command\n");
764 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); 909 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
765 at91_mci_completed_command(host); 910 at91_mci_completed_command(host, int_status);
766 } else 911 } else
767 at91_mci_write(host, AT91_MCI_IDR, int_status); 912 at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
768 913
769 return IRQ_HANDLED; 914 return IRQ_HANDLED;
770} 915}
@@ -793,25 +938,33 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
793 938
794static int at91_mci_get_ro(struct mmc_host *mmc) 939static int at91_mci_get_ro(struct mmc_host *mmc)
795{ 940{
796 int read_only = 0;
797 struct at91mci_host *host = mmc_priv(mmc); 941 struct at91mci_host *host = mmc_priv(mmc);
798 942
799 if (host->board->wp_pin) { 943 if (host->board->wp_pin)
800 read_only = gpio_get_value(host->board->wp_pin); 944 return !!gpio_get_value(host->board->wp_pin);
801 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc), 945 /*
802 (read_only ? "read-only" : "read-write") ); 946 * Board doesn't support read only detection; let the mmc core
803 } 947 * decide what to do.
804 else { 948 */
805 printk(KERN_WARNING "%s: host does not support reading read-only " 949 return -ENOSYS;
806 "switch. Assuming write-enable.\n", mmc_hostname(mmc)); 950}
807 } 951
808 return read_only; 952static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
953{
954 struct at91mci_host *host = mmc_priv(mmc);
955
956 pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
957 host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
958 at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
959 host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
960
809} 961}
810 962
811static const struct mmc_host_ops at91_mci_ops = { 963static const struct mmc_host_ops at91_mci_ops = {
812 .request = at91_mci_request, 964 .request = at91_mci_request,
813 .set_ios = at91_mci_set_ios, 965 .set_ios = at91_mci_set_ios,
814 .get_ro = at91_mci_get_ro, 966 .get_ro = at91_mci_get_ro,
967 .enable_sdio_irq = at91_mci_enable_sdio_irq,
815}; 968};
816 969
817/* 970/*
@@ -842,6 +995,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
842 mmc->f_min = 375000; 995 mmc->f_min = 375000;
843 mmc->f_max = 25000000; 996 mmc->f_max = 25000000;
844 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 997 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
998 mmc->caps = MMC_CAP_SDIO_IRQ;
845 999
846 mmc->max_blk_size = 4095; 1000 mmc->max_blk_size = 4095;
847 mmc->max_blk_count = mmc->max_req_size; 1001 mmc->max_blk_count = mmc->max_req_size;
@@ -935,6 +1089,8 @@ static int __init at91_mci_probe(struct platform_device *pdev)
935 1089
936 mmc_add_host(mmc); 1090 mmc_add_host(mmc);
937 1091
1092 setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
1093
938 /* 1094 /*
939 * monitor card insertion/removal if we can 1095 * monitor card insertion/removal if we can
940 */ 1096 */
@@ -995,6 +1151,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
995 } 1151 }
996 1152
997 at91_mci_disable(host); 1153 at91_mci_disable(host);
1154 del_timer_sync(&host->timer);
998 mmc_remove_host(mmc); 1155 mmc_remove_host(mmc);
999 free_irq(host->irq, host); 1156 free_irq(host->irq, host);
1000 1157
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
new file mode 100644
index 000000000000..a9a5657706c6
--- /dev/null
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -0,0 +1,91 @@
1/*
2 * Atmel MultiMedia Card Interface driver
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __DRIVERS_MMC_ATMEL_MCI_H__
11#define __DRIVERS_MMC_ATMEL_MCI_H__
12
13/* MCI Register Definitions */
14#define MCI_CR 0x0000 /* Control */
15# define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */
16# define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */
17# define MCI_CR_SWRST ( 1 << 7) /* Software Reset */
18#define MCI_MR 0x0004 /* Mode */
19# define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
20# define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */
21# define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */
22#define MCI_DTOR 0x0008 /* Data Timeout */
23# define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
24# define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
25#define MCI_SDCR 0x000c /* SD Card / SDIO */
26# define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */
27# define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */
28# define MCI_SDCBUS_1BIT ( 0 << 7) /* 1-bit data bus */
29# define MCI_SDCBUS_4BIT ( 1 << 7) /* 4-bit data bus */
30#define MCI_ARGR 0x0010 /* Command Argument */
31#define MCI_CMDR 0x0014 /* Command */
32# define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
33# define MCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */
34# define MCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */
35# define MCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */
36# define MCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */
37# define MCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */
38# define MCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */
39# define MCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */
40# define MCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */
41# define MCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */
42# define MCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */
43# define MCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */
44# define MCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */
45# define MCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */
46# define MCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */
47# define MCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */
48# define MCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */
49# define MCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */
50# define MCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */
51# define MCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */
52# define MCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */
53# define MCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */
54#define MCI_BLKR 0x0018 /* Block */
55# define MCI_BCNT(x) ((x) << 0) /* Data Block Count */
56# define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
57#define MCI_RSPR 0x0020 /* Response 0 */
58#define MCI_RSPR1 0x0024 /* Response 1 */
59#define MCI_RSPR2 0x0028 /* Response 2 */
60#define MCI_RSPR3 0x002c /* Response 3 */
61#define MCI_RDR 0x0030 /* Receive Data */
62#define MCI_TDR 0x0034 /* Transmit Data */
63#define MCI_SR 0x0040 /* Status */
64#define MCI_IER 0x0044 /* Interrupt Enable */
65#define MCI_IDR 0x0048 /* Interrupt Disable */
66#define MCI_IMR 0x004c /* Interrupt Mask */
67# define MCI_CMDRDY ( 1 << 0) /* Command Ready */
68# define MCI_RXRDY ( 1 << 1) /* Receiver Ready */
69# define MCI_TXRDY ( 1 << 2) /* Transmitter Ready */
70# define MCI_BLKE ( 1 << 3) /* Data Block Ended */
71# define MCI_DTIP ( 1 << 4) /* Data Transfer In Progress */
72# define MCI_NOTBUSY ( 1 << 5) /* Data Not Busy */
73# define MCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */
74# define MCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */
75# define MCI_RINDE ( 1 << 16) /* Response Index Error */
76# define MCI_RDIRE ( 1 << 17) /* Response Direction Error */
77# define MCI_RCRCE ( 1 << 18) /* Response CRC Error */
78# define MCI_RENDE ( 1 << 19) /* Response End Bit Error */
79# define MCI_RTOE ( 1 << 20) /* Response Time-Out Error */
80# define MCI_DCRCE ( 1 << 21) /* Data CRC Error */
81# define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */
82# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */
83# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */
84
85/* Register access macros */
86#define mci_readl(port,reg) \
87 __raw_readl((port)->regs + MCI_##reg)
88#define mci_writel(port,reg,value) \
89 __raw_writel((value), (port)->regs + MCI_##reg)
90
91#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
new file mode 100644
index 000000000000..cce873c5a149
--- /dev/null
+++ b/drivers/mmc/host/atmel-mci.c
@@ -0,0 +1,981 @@
1/*
2 * Atmel MultiMedia Card Interface driver
3 *
4 * Copyright (C) 2004-2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/device.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/ioport.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/scatterlist.h>
19
20#include <linux/mmc/host.h>
21
22#include <asm/atmel-mci.h>
23#include <asm/io.h>
24#include <asm/unaligned.h>
25
26#include <asm/arch/board.h>
27#include <asm/arch/gpio.h>
28
29#include "atmel-mci-regs.h"
30
31#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE)
32
33enum {
34 EVENT_CMD_COMPLETE = 0,
35 EVENT_DATA_ERROR,
36 EVENT_DATA_COMPLETE,
37 EVENT_STOP_SENT,
38 EVENT_STOP_COMPLETE,
39 EVENT_XFER_COMPLETE,
40};
41
42struct atmel_mci {
43 struct mmc_host *mmc;
44 void __iomem *regs;
45
46 struct scatterlist *sg;
47 unsigned int pio_offset;
48
49 struct mmc_request *mrq;
50 struct mmc_command *cmd;
51 struct mmc_data *data;
52
53 u32 cmd_status;
54 u32 data_status;
55 u32 stop_status;
56 u32 stop_cmdr;
57
58 u32 mode_reg;
59 u32 sdc_reg;
60
61 struct tasklet_struct tasklet;
62 unsigned long pending_events;
63 unsigned long completed_events;
64
65 int present;
66 int detect_pin;
67 int wp_pin;
68
69 /* For detect pin debouncing */
70 struct timer_list detect_timer;
71
72 unsigned long bus_hz;
73 unsigned long mapbase;
74 struct clk *mck;
75 struct platform_device *pdev;
76};
77
78#define atmci_is_completed(host, event) \
79 test_bit(event, &host->completed_events)
80#define atmci_test_and_clear_pending(host, event) \
81 test_and_clear_bit(event, &host->pending_events)
82#define atmci_test_and_set_completed(host, event) \
83 test_and_set_bit(event, &host->completed_events)
84#define atmci_set_completed(host, event) \
85 set_bit(event, &host->completed_events)
86#define atmci_set_pending(host, event) \
87 set_bit(event, &host->pending_events)
88#define atmci_clear_pending(host, event) \
89 clear_bit(event, &host->pending_events)
90
91
92static void atmci_enable(struct atmel_mci *host)
93{
94 clk_enable(host->mck);
95 mci_writel(host, CR, MCI_CR_MCIEN);
96 mci_writel(host, MR, host->mode_reg);
97 mci_writel(host, SDCR, host->sdc_reg);
98}
99
100static void atmci_disable(struct atmel_mci *host)
101{
102 mci_writel(host, CR, MCI_CR_SWRST);
103
104 /* Stall until write is complete, then disable the bus clock */
105 mci_readl(host, SR);
106 clk_disable(host->mck);
107}
108
109static inline unsigned int ns_to_clocks(struct atmel_mci *host,
110 unsigned int ns)
111{
112 return (ns * (host->bus_hz / 1000000) + 999) / 1000;
113}
114
115static void atmci_set_timeout(struct atmel_mci *host,
116 struct mmc_data *data)
117{
118 static unsigned dtomul_to_shift[] = {
119 0, 4, 7, 8, 10, 12, 16, 20
120 };
121 unsigned timeout;
122 unsigned dtocyc;
123 unsigned dtomul;
124
125 timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks;
126
127 for (dtomul = 0; dtomul < 8; dtomul++) {
128 unsigned shift = dtomul_to_shift[dtomul];
129 dtocyc = (timeout + (1 << shift) - 1) >> shift;
130 if (dtocyc < 15)
131 break;
132 }
133
134 if (dtomul >= 8) {
135 dtomul = 7;
136 dtocyc = 15;
137 }
138
139 dev_vdbg(&host->mmc->class_dev, "setting timeout to %u cycles\n",
140 dtocyc << dtomul_to_shift[dtomul]);
141 mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc)));
142}
143
144/*
145 * Return mask with command flags to be enabled for this command.
146 */
147static u32 atmci_prepare_command(struct mmc_host *mmc,
148 struct mmc_command *cmd)
149{
150 struct mmc_data *data;
151 u32 cmdr;
152
153 cmd->error = -EINPROGRESS;
154
155 cmdr = MCI_CMDR_CMDNB(cmd->opcode);
156
157 if (cmd->flags & MMC_RSP_PRESENT) {
158 if (cmd->flags & MMC_RSP_136)
159 cmdr |= MCI_CMDR_RSPTYP_136BIT;
160 else
161 cmdr |= MCI_CMDR_RSPTYP_48BIT;
162 }
163
164 /*
165 * This should really be MAXLAT_5 for CMD2 and ACMD41, but
166 * it's too difficult to determine whether this is an ACMD or
167 * not. Better make it 64.
168 */
169 cmdr |= MCI_CMDR_MAXLAT_64CYC;
170
171 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
172 cmdr |= MCI_CMDR_OPDCMD;
173
174 data = cmd->data;
175 if (data) {
176 cmdr |= MCI_CMDR_START_XFER;
177 if (data->flags & MMC_DATA_STREAM)
178 cmdr |= MCI_CMDR_STREAM;
179 else if (data->blocks > 1)
180 cmdr |= MCI_CMDR_MULTI_BLOCK;
181 else
182 cmdr |= MCI_CMDR_BLOCK;
183
184 if (data->flags & MMC_DATA_READ)
185 cmdr |= MCI_CMDR_TRDIR_READ;
186 }
187
188 return cmdr;
189}
190
191static void atmci_start_command(struct atmel_mci *host,
192 struct mmc_command *cmd,
193 u32 cmd_flags)
194{
195 /* Must read host->cmd after testing event flags */
196 smp_rmb();
197 WARN_ON(host->cmd);
198 host->cmd = cmd;
199
200 dev_vdbg(&host->mmc->class_dev,
201 "start command: ARGR=0x%08x CMDR=0x%08x\n",
202 cmd->arg, cmd_flags);
203
204 mci_writel(host, ARGR, cmd->arg);
205 mci_writel(host, CMDR, cmd_flags);
206}
207
208static void send_stop_cmd(struct mmc_host *mmc, struct mmc_data *data)
209{
210 struct atmel_mci *host = mmc_priv(mmc);
211
212 atmci_start_command(host, data->stop, host->stop_cmdr);
213 mci_writel(host, IER, MCI_CMDRDY);
214}
215
216static void atmci_request_end(struct mmc_host *mmc, struct mmc_request *mrq)
217{
218 struct atmel_mci *host = mmc_priv(mmc);
219
220 WARN_ON(host->cmd || host->data);
221 host->mrq = NULL;
222
223 atmci_disable(host);
224
225 mmc_request_done(mmc, mrq);
226}
227
228/*
229 * Returns a mask of interrupt flags to be enabled after the whole
230 * request has been prepared.
231 */
232static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data)
233{
234 struct atmel_mci *host = mmc_priv(mmc);
235 u32 iflags;
236
237 data->error = -EINPROGRESS;
238
239 WARN_ON(host->data);
240 host->sg = NULL;
241 host->data = data;
242
243 mci_writel(host, BLKR, MCI_BCNT(data->blocks)
244 | MCI_BLKLEN(data->blksz));
245 dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n",
246 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
247
248 iflags = ATMCI_DATA_ERROR_FLAGS;
249 host->sg = data->sg;
250 host->pio_offset = 0;
251 if (data->flags & MMC_DATA_READ)
252 iflags |= MCI_RXRDY;
253 else
254 iflags |= MCI_TXRDY;
255
256 return iflags;
257}
258
259static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
260{
261 struct atmel_mci *host = mmc_priv(mmc);
262 struct mmc_data *data;
263 struct mmc_command *cmd;
264 u32 iflags;
265 u32 cmdflags = 0;
266
267 iflags = mci_readl(host, IMR);
268 if (iflags)
269 dev_warn(&mmc->class_dev, "WARNING: IMR=0x%08x\n",
270 mci_readl(host, IMR));
271
272 WARN_ON(host->mrq != NULL);
273
274 /*
275 * We may "know" the card is gone even though there's still an
276 * electrical connection. If so, we really need to communicate
277 * this to the MMC core since there won't be any more
278 * interrupts as the card is completely removed. Otherwise,
279 * the MMC core might believe the card is still there even
280 * though the card was just removed very slowly.
281 */
282 if (!host->present) {
283 mrq->cmd->error = -ENOMEDIUM;
284 mmc_request_done(mmc, mrq);
285 return;
286 }
287
288 host->mrq = mrq;
289 host->pending_events = 0;
290 host->completed_events = 0;
291
292 atmci_enable(host);
293
294 /* We don't support multiple blocks of weird lengths. */
295 data = mrq->data;
296 if (data) {
297 if (data->blocks > 1 && data->blksz & 3)
298 goto fail;
299 atmci_set_timeout(host, data);
300 }
301
302 iflags = MCI_CMDRDY;
303 cmd = mrq->cmd;
304 cmdflags = atmci_prepare_command(mmc, cmd);
305 atmci_start_command(host, cmd, cmdflags);
306
307 if (data)
308 iflags |= atmci_submit_data(mmc, data);
309
310 if (mrq->stop) {
311 host->stop_cmdr = atmci_prepare_command(mmc, mrq->stop);
312 host->stop_cmdr |= MCI_CMDR_STOP_XFER;
313 if (!(data->flags & MMC_DATA_WRITE))
314 host->stop_cmdr |= MCI_CMDR_TRDIR_READ;
315 if (data->flags & MMC_DATA_STREAM)
316 host->stop_cmdr |= MCI_CMDR_STREAM;
317 else
318 host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK;
319 }
320
321 /*
322 * We could have enabled interrupts earlier, but I suspect
323 * that would open up a nice can of interesting race
324 * conditions (e.g. command and data complete, but stop not
325 * prepared yet.)
326 */
327 mci_writel(host, IER, iflags);
328
329 return;
330
331fail:
332 atmci_disable(host);
333 host->mrq = NULL;
334 mrq->cmd->error = -EINVAL;
335 mmc_request_done(mmc, mrq);
336}
337
338static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
339{
340 struct atmel_mci *host = mmc_priv(mmc);
341
342 if (ios->clock) {
343 u32 clkdiv;
344
345 /* Set clock rate */
346 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * ios->clock) - 1;
347 if (clkdiv > 255) {
348 dev_warn(&mmc->class_dev,
349 "clock %u too slow; using %lu\n",
350 ios->clock, host->bus_hz / (2 * 256));
351 clkdiv = 255;
352 }
353
354 host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF
355 | MCI_MR_RDPROOF;
356 }
357
358 switch (ios->bus_width) {
359 case MMC_BUS_WIDTH_1:
360 host->sdc_reg = 0;
361 break;
362 case MMC_BUS_WIDTH_4:
363 host->sdc_reg = MCI_SDCBUS_4BIT;
364 break;
365 }
366
367 switch (ios->power_mode) {
368 case MMC_POWER_ON:
369 /* Send init sequence (74 clock cycles) */
370 atmci_enable(host);
371 mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT);
372 while (!(mci_readl(host, SR) & MCI_CMDRDY))
373 cpu_relax();
374 atmci_disable(host);
375 break;
376 default:
377 /*
378 * TODO: None of the currently available AVR32-based
379 * boards allow MMC power to be turned off. Implement
380 * power control when this can be tested properly.
381 */
382 break;
383 }
384}
385
386static int atmci_get_ro(struct mmc_host *mmc)
387{
388 int read_only = 0;
389 struct atmel_mci *host = mmc_priv(mmc);
390
391 if (host->wp_pin >= 0) {
392 read_only = gpio_get_value(host->wp_pin);
393 dev_dbg(&mmc->class_dev, "card is %s\n",
394 read_only ? "read-only" : "read-write");
395 } else {
396 dev_dbg(&mmc->class_dev,
397 "no pin for checking read-only switch."
398 " Assuming write-enable.\n");
399 }
400
401 return read_only;
402}
403
404static struct mmc_host_ops atmci_ops = {
405 .request = atmci_request,
406 .set_ios = atmci_set_ios,
407 .get_ro = atmci_get_ro,
408};
409
410static void atmci_command_complete(struct atmel_mci *host,
411 struct mmc_command *cmd, u32 status)
412{
413 /* Read the response from the card (up to 16 bytes) */
414 cmd->resp[0] = mci_readl(host, RSPR);
415 cmd->resp[1] = mci_readl(host, RSPR);
416 cmd->resp[2] = mci_readl(host, RSPR);
417 cmd->resp[3] = mci_readl(host, RSPR);
418
419 if (status & MCI_RTOE)
420 cmd->error = -ETIMEDOUT;
421 else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE))
422 cmd->error = -EILSEQ;
423 else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE))
424 cmd->error = -EIO;
425 else
426 cmd->error = 0;
427
428 if (cmd->error) {
429 dev_dbg(&host->mmc->class_dev,
430 "command error: status=0x%08x\n", status);
431
432 if (cmd->data) {
433 host->data = NULL;
434 mci_writel(host, IDR, MCI_NOTBUSY
435 | MCI_TXRDY | MCI_RXRDY
436 | ATMCI_DATA_ERROR_FLAGS);
437 }
438 }
439}
440
441static void atmci_detect_change(unsigned long data)
442{
443 struct atmel_mci *host = (struct atmel_mci *)data;
444 struct mmc_request *mrq = host->mrq;
445 int present;
446
447 /*
448 * atmci_remove() sets detect_pin to -1 before freeing the
449 * interrupt. We must not re-enable the interrupt if it has
450 * been freed.
451 */
452 smp_rmb();
453 if (host->detect_pin < 0)
454 return;
455
456 enable_irq(gpio_to_irq(host->detect_pin));
457 present = !gpio_get_value(host->detect_pin);
458
459 dev_vdbg(&host->pdev->dev, "detect change: %d (was %d)\n",
460 present, host->present);
461
462 if (present != host->present) {
463 dev_dbg(&host->mmc->class_dev, "card %s\n",
464 present ? "inserted" : "removed");
465 host->present = present;
466
467 /* Reset controller if card is gone */
468 if (!present) {
469 mci_writel(host, CR, MCI_CR_SWRST);
470 mci_writel(host, IDR, ~0UL);
471 mci_writel(host, CR, MCI_CR_MCIEN);
472 }
473
474 /* Clean up queue if present */
475 if (mrq) {
476 /*
477 * Reset controller to terminate any ongoing
478 * commands or data transfers.
479 */
480 mci_writel(host, CR, MCI_CR_SWRST);
481
482 if (!atmci_is_completed(host, EVENT_CMD_COMPLETE))
483 mrq->cmd->error = -ENOMEDIUM;
484
485 if (mrq->data && !atmci_is_completed(host,
486 EVENT_DATA_COMPLETE)) {
487 host->data = NULL;
488 mrq->data->error = -ENOMEDIUM;
489 }
490 if (mrq->stop && !atmci_is_completed(host,
491 EVENT_STOP_COMPLETE))
492 mrq->stop->error = -ENOMEDIUM;
493
494 host->cmd = NULL;
495 atmci_request_end(host->mmc, mrq);
496 }
497
498 mmc_detect_change(host->mmc, 0);
499 }
500}
501
502static void atmci_tasklet_func(unsigned long priv)
503{
504 struct mmc_host *mmc = (struct mmc_host *)priv;
505 struct atmel_mci *host = mmc_priv(mmc);
506 struct mmc_request *mrq = host->mrq;
507 struct mmc_data *data = host->data;
508
509 dev_vdbg(&mmc->class_dev,
510 "tasklet: pending/completed/mask %lx/%lx/%x\n",
511 host->pending_events, host->completed_events,
512 mci_readl(host, IMR));
513
514 if (atmci_test_and_clear_pending(host, EVENT_CMD_COMPLETE)) {
515 /*
516 * host->cmd must be set to NULL before the interrupt
517 * handler sees EVENT_CMD_COMPLETE
518 */
519 host->cmd = NULL;
520 smp_wmb();
521 atmci_set_completed(host, EVENT_CMD_COMPLETE);
522 atmci_command_complete(host, mrq->cmd, host->cmd_status);
523
524 if (!mrq->cmd->error && mrq->stop
525 && atmci_is_completed(host, EVENT_XFER_COMPLETE)
526 && !atmci_test_and_set_completed(host,
527 EVENT_STOP_SENT))
528 send_stop_cmd(host->mmc, mrq->data);
529 }
530 if (atmci_test_and_clear_pending(host, EVENT_STOP_COMPLETE)) {
531 /*
532 * host->cmd must be set to NULL before the interrupt
533 * handler sees EVENT_STOP_COMPLETE
534 */
535 host->cmd = NULL;
536 smp_wmb();
537 atmci_set_completed(host, EVENT_STOP_COMPLETE);
538 atmci_command_complete(host, mrq->stop, host->stop_status);
539 }
540 if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) {
541 u32 status = host->data_status;
542
543 dev_vdbg(&mmc->class_dev, "data error: status=%08x\n", status);
544
545 atmci_set_completed(host, EVENT_DATA_ERROR);
546 atmci_set_completed(host, EVENT_DATA_COMPLETE);
547
548 if (status & MCI_DTOE) {
549 dev_dbg(&mmc->class_dev,
550 "data timeout error\n");
551 data->error = -ETIMEDOUT;
552 } else if (status & MCI_DCRCE) {
553 dev_dbg(&mmc->class_dev, "data CRC error\n");
554 data->error = -EILSEQ;
555 } else {
556 dev_dbg(&mmc->class_dev,
557 "data FIFO error (status=%08x)\n",
558 status);
559 data->error = -EIO;
560 }
561
562 if (host->present && data->stop
563 && atmci_is_completed(host, EVENT_CMD_COMPLETE)
564 && !atmci_test_and_set_completed(
565 host, EVENT_STOP_SENT))
566 send_stop_cmd(host->mmc, data);
567
568 host->data = NULL;
569 }
570 if (atmci_test_and_clear_pending(host, EVENT_DATA_COMPLETE)) {
571 atmci_set_completed(host, EVENT_DATA_COMPLETE);
572
573 if (!atmci_is_completed(host, EVENT_DATA_ERROR)) {
574 data->bytes_xfered = data->blocks * data->blksz;
575 data->error = 0;
576 }
577
578 host->data = NULL;
579 }
580
581 if (host->mrq && !host->cmd && !host->data)
582 atmci_request_end(mmc, host->mrq);
583}
584
585static void atmci_read_data_pio(struct atmel_mci *host)
586{
587 struct scatterlist *sg = host->sg;
588 void *buf = sg_virt(sg);
589 unsigned int offset = host->pio_offset;
590 struct mmc_data *data = host->data;
591 u32 value;
592 u32 status;
593 unsigned int nbytes = 0;
594
595 do {
596 value = mci_readl(host, RDR);
597 if (likely(offset + 4 <= sg->length)) {
598 put_unaligned(value, (u32 *)(buf + offset));
599
600 offset += 4;
601 nbytes += 4;
602
603 if (offset == sg->length) {
604 host->sg = sg = sg_next(sg);
605 if (!sg)
606 goto done;
607
608 offset = 0;
609 buf = sg_virt(sg);
610 }
611 } else {
612 unsigned int remaining = sg->length - offset;
613 memcpy(buf + offset, &value, remaining);
614 nbytes += remaining;
615
616 flush_dcache_page(sg_page(sg));
617 host->sg = sg = sg_next(sg);
618 if (!sg)
619 goto done;
620
621 offset = 4 - remaining;
622 buf = sg_virt(sg);
623 memcpy(buf, (u8 *)&value + remaining, offset);
624 nbytes += offset;
625 }
626
627 status = mci_readl(host, SR);
628 if (status & ATMCI_DATA_ERROR_FLAGS) {
629 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY
630 | ATMCI_DATA_ERROR_FLAGS));
631 host->data_status = status;
632 atmci_set_pending(host, EVENT_DATA_ERROR);
633 tasklet_schedule(&host->tasklet);
634 break;
635 }
636 } while (status & MCI_RXRDY);
637
638 host->pio_offset = offset;
639 data->bytes_xfered += nbytes;
640
641 return;
642
643done:
644 mci_writel(host, IDR, MCI_RXRDY);
645 mci_writel(host, IER, MCI_NOTBUSY);
646 data->bytes_xfered += nbytes;
647 atmci_set_completed(host, EVENT_XFER_COMPLETE);
648 if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE)
649 && !atmci_test_and_set_completed(host, EVENT_STOP_SENT))
650 send_stop_cmd(host->mmc, data);
651}
652
653static void atmci_write_data_pio(struct atmel_mci *host)
654{
655 struct scatterlist *sg = host->sg;
656 void *buf = sg_virt(sg);
657 unsigned int offset = host->pio_offset;
658 struct mmc_data *data = host->data;
659 u32 value;
660 u32 status;
661 unsigned int nbytes = 0;
662
663 do {
664 if (likely(offset + 4 <= sg->length)) {
665 value = get_unaligned((u32 *)(buf + offset));
666 mci_writel(host, TDR, value);
667
668 offset += 4;
669 nbytes += 4;
670 if (offset == sg->length) {
671 host->sg = sg = sg_next(sg);
672 if (!sg)
673 goto done;
674
675 offset = 0;
676 buf = sg_virt(sg);
677 }
678 } else {
679 unsigned int remaining = sg->length - offset;
680
681 value = 0;
682 memcpy(&value, buf + offset, remaining);
683 nbytes += remaining;
684
685 host->sg = sg = sg_next(sg);
686 if (!sg) {
687 mci_writel(host, TDR, value);
688 goto done;
689 }
690
691 offset = 4 - remaining;
692 buf = sg_virt(sg);
693 memcpy((u8 *)&value + remaining, buf, offset);
694 mci_writel(host, TDR, value);
695 nbytes += offset;
696 }
697
698 status = mci_readl(host, SR);
699 if (status & ATMCI_DATA_ERROR_FLAGS) {
700 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY
701 | ATMCI_DATA_ERROR_FLAGS));
702 host->data_status = status;
703 atmci_set_pending(host, EVENT_DATA_ERROR);
704 tasklet_schedule(&host->tasklet);
705 break;
706 }
707 } while (status & MCI_TXRDY);
708
709 host->pio_offset = offset;
710 data->bytes_xfered += nbytes;
711
712 return;
713
714done:
715 mci_writel(host, IDR, MCI_TXRDY);
716 mci_writel(host, IER, MCI_NOTBUSY);
717 data->bytes_xfered += nbytes;
718 atmci_set_completed(host, EVENT_XFER_COMPLETE);
719 if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE)
720 && !atmci_test_and_set_completed(host, EVENT_STOP_SENT))
721 send_stop_cmd(host->mmc, data);
722}
723
724static void atmci_cmd_interrupt(struct mmc_host *mmc, u32 status)
725{
726 struct atmel_mci *host = mmc_priv(mmc);
727
728 mci_writel(host, IDR, MCI_CMDRDY);
729
730 if (atmci_is_completed(host, EVENT_STOP_SENT)) {
731 host->stop_status = status;
732 atmci_set_pending(host, EVENT_STOP_COMPLETE);
733 } else {
734 host->cmd_status = status;
735 atmci_set_pending(host, EVENT_CMD_COMPLETE);
736 }
737
738 tasklet_schedule(&host->tasklet);
739}
740
741static irqreturn_t atmci_interrupt(int irq, void *dev_id)
742{
743 struct mmc_host *mmc = dev_id;
744 struct atmel_mci *host = mmc_priv(mmc);
745 u32 status, mask, pending;
746 unsigned int pass_count = 0;
747
748 spin_lock(&mmc->lock);
749
750 do {
751 status = mci_readl(host, SR);
752 mask = mci_readl(host, IMR);
753 pending = status & mask;
754 if (!pending)
755 break;
756
757 if (pending & ATMCI_DATA_ERROR_FLAGS) {
758 mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS
759 | MCI_RXRDY | MCI_TXRDY);
760 pending &= mci_readl(host, IMR);
761 host->data_status = status;
762 atmci_set_pending(host, EVENT_DATA_ERROR);
763 tasklet_schedule(&host->tasklet);
764 }
765 if (pending & MCI_NOTBUSY) {
766 mci_writel(host, IDR, (MCI_NOTBUSY
767 | ATMCI_DATA_ERROR_FLAGS));
768 atmci_set_pending(host, EVENT_DATA_COMPLETE);
769 tasklet_schedule(&host->tasklet);
770 }
771 if (pending & MCI_RXRDY)
772 atmci_read_data_pio(host);
773 if (pending & MCI_TXRDY)
774 atmci_write_data_pio(host);
775
776 if (pending & MCI_CMDRDY)
777 atmci_cmd_interrupt(mmc, status);
778 } while (pass_count++ < 5);
779
780 spin_unlock(&mmc->lock);
781
782 return pass_count ? IRQ_HANDLED : IRQ_NONE;
783}
784
785static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
786{
787 struct mmc_host *mmc = dev_id;
788 struct atmel_mci *host = mmc_priv(mmc);
789
790 /*
791 * Disable interrupts until the pin has stabilized and check
792 * the state then. Use mod_timer() since we may be in the
793 * middle of the timer routine when this interrupt triggers.
794 */
795 disable_irq_nosync(irq);
796 mod_timer(&host->detect_timer, jiffies + msecs_to_jiffies(20));
797
798 return IRQ_HANDLED;
799}
800
801static int __init atmci_probe(struct platform_device *pdev)
802{
803 struct mci_platform_data *pdata;
804 struct atmel_mci *host;
805 struct mmc_host *mmc;
806 struct resource *regs;
807 int irq;
808 int ret;
809
810 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
811 if (!regs)
812 return -ENXIO;
813 pdata = pdev->dev.platform_data;
814 if (!pdata)
815 return -ENXIO;
816 irq = platform_get_irq(pdev, 0);
817 if (irq < 0)
818 return irq;
819
820 mmc = mmc_alloc_host(sizeof(struct atmel_mci), &pdev->dev);
821 if (!mmc)
822 return -ENOMEM;
823
824 host = mmc_priv(mmc);
825 host->pdev = pdev;
826 host->mmc = mmc;
827 host->detect_pin = pdata->detect_pin;
828 host->wp_pin = pdata->wp_pin;
829
830 host->mck = clk_get(&pdev->dev, "mci_clk");
831 if (IS_ERR(host->mck)) {
832 ret = PTR_ERR(host->mck);
833 goto err_clk_get;
834 }
835
836 ret = -ENOMEM;
837 host->regs = ioremap(regs->start, regs->end - regs->start + 1);
838 if (!host->regs)
839 goto err_ioremap;
840
841 clk_enable(host->mck);
842 mci_writel(host, CR, MCI_CR_SWRST);
843 host->bus_hz = clk_get_rate(host->mck);
844 clk_disable(host->mck);
845
846 host->mapbase = regs->start;
847
848 mmc->ops = &atmci_ops;
849 mmc->f_min = (host->bus_hz + 511) / 512;
850 mmc->f_max = host->bus_hz / 2;
851 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
852 mmc->caps |= MMC_CAP_4_BIT_DATA;
853
854 mmc->max_hw_segs = 64;
855 mmc->max_phys_segs = 64;
856 mmc->max_req_size = 32768 * 512;
857 mmc->max_blk_size = 32768;
858 mmc->max_blk_count = 512;
859
860 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)mmc);
861
862 ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, mmc);
863 if (ret)
864 goto err_request_irq;
865
866 /* Assume card is present if we don't have a detect pin */
867 host->present = 1;
868 if (host->detect_pin >= 0) {
869 if (gpio_request(host->detect_pin, "mmc_detect")) {
870 dev_dbg(&mmc->class_dev, "no detect pin available\n");
871 host->detect_pin = -1;
872 } else {
873 host->present = !gpio_get_value(host->detect_pin);
874 }
875 }
876 if (host->wp_pin >= 0) {
877 if (gpio_request(host->wp_pin, "mmc_wp")) {
878 dev_dbg(&mmc->class_dev, "no WP pin available\n");
879 host->wp_pin = -1;
880 }
881 }
882
883 platform_set_drvdata(pdev, host);
884
885 mmc_add_host(mmc);
886
887 if (host->detect_pin >= 0) {
888 setup_timer(&host->detect_timer, atmci_detect_change,
889 (unsigned long)host);
890
891 ret = request_irq(gpio_to_irq(host->detect_pin),
892 atmci_detect_interrupt,
893 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
894 "mmc-detect", mmc);
895 if (ret) {
896 dev_dbg(&mmc->class_dev,
897 "could not request IRQ %d for detect pin\n",
898 gpio_to_irq(host->detect_pin));
899 gpio_free(host->detect_pin);
900 host->detect_pin = -1;
901 }
902 }
903
904 dev_info(&mmc->class_dev,
905 "Atmel MCI controller at 0x%08lx irq %d\n",
906 host->mapbase, irq);
907
908 return 0;
909
910err_request_irq:
911 iounmap(host->regs);
912err_ioremap:
913 clk_put(host->mck);
914err_clk_get:
915 mmc_free_host(mmc);
916 return ret;
917}
918
919static int __exit atmci_remove(struct platform_device *pdev)
920{
921 struct atmel_mci *host = platform_get_drvdata(pdev);
922
923 platform_set_drvdata(pdev, NULL);
924
925 if (host) {
926 if (host->detect_pin >= 0) {
927 int pin = host->detect_pin;
928
929 /* Make sure the timer doesn't enable the interrupt */
930 host->detect_pin = -1;
931 smp_wmb();
932
933 free_irq(gpio_to_irq(pin), host->mmc);
934 del_timer_sync(&host->detect_timer);
935 gpio_free(pin);
936 }
937
938 mmc_remove_host(host->mmc);
939
940 clk_enable(host->mck);
941 mci_writel(host, IDR, ~0UL);
942 mci_writel(host, CR, MCI_CR_MCIDIS);
943 mci_readl(host, SR);
944 clk_disable(host->mck);
945
946 if (host->wp_pin >= 0)
947 gpio_free(host->wp_pin);
948
949 free_irq(platform_get_irq(pdev, 0), host->mmc);
950 iounmap(host->regs);
951
952 clk_put(host->mck);
953
954 mmc_free_host(host->mmc);
955 }
956 return 0;
957}
958
959static struct platform_driver atmci_driver = {
960 .remove = __exit_p(atmci_remove),
961 .driver = {
962 .name = "atmel_mci",
963 },
964};
965
966static int __init atmci_init(void)
967{
968 return platform_driver_probe(&atmci_driver, atmci_probe);
969}
970
971static void __exit atmci_exit(void)
972{
973 platform_driver_unregister(&atmci_driver);
974}
975
976module_init(atmci_init);
977module_exit(atmci_exit);
978
979MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
980MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
981MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index cc5f7bc546af..3f15eb204895 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -21,7 +21,7 @@
21 * published by the Free Software Foundation. 21 * published by the Free Software Foundation.
22 */ 22 */
23 23
24/* Why is a timer used to detect insert events? 24/* Why don't we use the SD controllers' carddetect feature?
25 * 25 *
26 * From the AU1100 MMC application guide: 26 * From the AU1100 MMC application guide:
27 * If the Au1100-based design is intended to support both MultiMediaCards 27 * If the Au1100-based design is intended to support both MultiMediaCards
@@ -30,8 +30,6 @@
30 * In doing so, a MMC card never enters SPI-mode communications, 30 * In doing so, a MMC card never enters SPI-mode communications,
31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective 31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
32 * (the low to high transition will not occur). 32 * (the low to high transition will not occur).
33 *
34 * So we use the timer to check the status manually.
35 */ 33 */
36 34
37#include <linux/module.h> 35#include <linux/module.h>
@@ -41,51 +39,110 @@
41#include <linux/interrupt.h> 39#include <linux/interrupt.h>
42#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
43#include <linux/scatterlist.h> 41#include <linux/scatterlist.h>
44 42#include <linux/leds.h>
45#include <linux/mmc/host.h> 43#include <linux/mmc/host.h>
44
46#include <asm/io.h> 45#include <asm/io.h>
47#include <asm/mach-au1x00/au1000.h> 46#include <asm/mach-au1x00/au1000.h>
48#include <asm/mach-au1x00/au1xxx_dbdma.h> 47#include <asm/mach-au1x00/au1xxx_dbdma.h>
49#include <asm/mach-au1x00/au1100_mmc.h> 48#include <asm/mach-au1x00/au1100_mmc.h>
50 49
51#include <au1xxx.h>
52#include "au1xmmc.h"
53
54#define DRIVER_NAME "au1xxx-mmc" 50#define DRIVER_NAME "au1xxx-mmc"
55 51
56/* Set this to enable special debugging macros */ 52/* Set this to enable special debugging macros */
53/* #define DEBUG */
57 54
58#ifdef DEBUG 55#ifdef DEBUG
59#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args) 56#define DBG(fmt, idx, args...) \
57 printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args)
60#else 58#else
61#define DBG(fmt, idx, args...) 59#define DBG(fmt, idx, args...) do {} while (0)
62#endif 60#endif
63 61
64const struct { 62/* Hardware definitions */
63#define AU1XMMC_DESCRIPTOR_COUNT 1
64#define AU1XMMC_DESCRIPTOR_SIZE 2048
65
66#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
67 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
68 MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
69
70/* This gives us a hard value for the stop command that we can write directly
71 * to the command register.
72 */
73#define STOP_CMD \
74 (SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO)
75
76/* This is the set of interrupts that we configure by default. */
77#define AU1XMMC_INTERRUPTS \
78 (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \
79 SD_CONFIG_CR | SD_CONFIG_I)
80
81/* The poll event (looking for insert/remove events runs twice a second. */
82#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
83
84struct au1xmmc_host {
85 struct mmc_host *mmc;
86 struct mmc_request *mrq;
87
88 u32 flags;
65 u32 iobase; 89 u32 iobase;
66 u32 tx_devid, rx_devid; 90 u32 clock;
67 u16 bcsrpwr; 91 u32 bus_width;
68 u16 bcsrstatus; 92 u32 power_mode;
69 u16 wpstatus;
70} au1xmmc_card_table[] = {
71 { SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0,
72 BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP },
73#ifndef CONFIG_MIPS_DB1200
74 { SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1,
75 BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP }
76#endif
77};
78 93
79#define AU1XMMC_CONTROLLER_COUNT (ARRAY_SIZE(au1xmmc_card_table)) 94 int status;
80 95
81/* This array stores pointers for the hosts (used by the IRQ handler) */ 96 struct {
82struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT]; 97 int len;
83static int dma = 1; 98 int dir;
99 } dma;
84 100
85#ifdef MODULE 101 struct {
86module_param(dma, bool, 0); 102 int index;
87MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)"); 103 int offset;
88#endif 104 int len;
105 } pio;
106
107 u32 tx_chan;
108 u32 rx_chan;
109
110 int irq;
111
112 struct tasklet_struct finish_task;
113 struct tasklet_struct data_task;
114 struct au1xmmc_platform_data *platdata;
115 struct platform_device *pdev;
116 struct resource *ioarea;
117};
118
119/* Status flags used by the host structure */
120#define HOST_F_XMIT 0x0001
121#define HOST_F_RECV 0x0002
122#define HOST_F_DMA 0x0010
123#define HOST_F_ACTIVE 0x0100
124#define HOST_F_STOP 0x1000
125
126#define HOST_S_IDLE 0x0001
127#define HOST_S_CMD 0x0002
128#define HOST_S_DATA 0x0003
129#define HOST_S_STOP 0x0004
130
131/* Easy access macros */
132#define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
133#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
134#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
135#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
136#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
137#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
138#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
139#define HOST_CMD(h) ((h)->iobase + SD_CMD)
140#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
141#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
142#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
143
144#define DMA_CHANNEL(h) \
145 (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
89 146
90static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) 147static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
91{ 148{
@@ -119,14 +176,13 @@ static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
119 176
120static inline void SEND_STOP(struct au1xmmc_host *host) 177static inline void SEND_STOP(struct au1xmmc_host *host)
121{ 178{
122 179 u32 config2;
123 /* We know the value of CONFIG2, so avoid a read we don't need */
124 u32 mask = SD_CONFIG2_EN;
125 180
126 WARN_ON(host->status != HOST_S_DATA); 181 WARN_ON(host->status != HOST_S_DATA);
127 host->status = HOST_S_STOP; 182 host->status = HOST_S_STOP;
128 183
129 au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host)); 184 config2 = au_readl(HOST_CONFIG2(host));
185 au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
130 au_sync(); 186 au_sync();
131 187
132 /* Send the stop commmand */ 188 /* Send the stop commmand */
@@ -135,35 +191,36 @@ static inline void SEND_STOP(struct au1xmmc_host *host)
135 191
136static void au1xmmc_set_power(struct au1xmmc_host *host, int state) 192static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
137{ 193{
138 194 if (host->platdata && host->platdata->set_power)
139 u32 val = au1xmmc_card_table[host->id].bcsrpwr; 195 host->platdata->set_power(host->mmc, state);
140
141 bcsr->board &= ~val;
142 if (state) bcsr->board |= val;
143
144 au_sync_delay(1);
145} 196}
146 197
147static inline int au1xmmc_card_inserted(struct au1xmmc_host *host) 198static int au1xmmc_card_inserted(struct mmc_host *mmc)
148{ 199{
149 return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus) 200 struct au1xmmc_host *host = mmc_priv(mmc);
150 ? 1 : 0; 201
202 if (host->platdata && host->platdata->card_inserted)
203 return !!host->platdata->card_inserted(host->mmc);
204
205 return -ENOSYS;
151} 206}
152 207
153static int au1xmmc_card_readonly(struct mmc_host *mmc) 208static int au1xmmc_card_readonly(struct mmc_host *mmc)
154{ 209{
155 struct au1xmmc_host *host = mmc_priv(mmc); 210 struct au1xmmc_host *host = mmc_priv(mmc);
156 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) 211
157 ? 1 : 0; 212 if (host->platdata && host->platdata->card_readonly)
213 return !!host->platdata->card_readonly(mmc);
214
215 return -ENOSYS;
158} 216}
159 217
160static void au1xmmc_finish_request(struct au1xmmc_host *host) 218static void au1xmmc_finish_request(struct au1xmmc_host *host)
161{ 219{
162
163 struct mmc_request *mrq = host->mrq; 220 struct mmc_request *mrq = host->mrq;
164 221
165 host->mrq = NULL; 222 host->mrq = NULL;
166 host->flags &= HOST_F_ACTIVE; 223 host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
167 224
168 host->dma.len = 0; 225 host->dma.len = 0;
169 host->dma.dir = 0; 226 host->dma.dir = 0;
@@ -174,8 +231,6 @@ static void au1xmmc_finish_request(struct au1xmmc_host *host)
174 231
175 host->status = HOST_S_IDLE; 232 host->status = HOST_S_IDLE;
176 233
177 bcsr->disk_leds |= (1 << 8);
178
179 mmc_request_done(host->mmc, mrq); 234 mmc_request_done(host->mmc, mrq);
180} 235}
181 236
@@ -235,18 +290,14 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
235 au_sync(); 290 au_sync();
236 291
237 /* Wait for the command to go on the line */ 292 /* Wait for the command to go on the line */
238 293 while (au_readl(HOST_CMD(host)) & SD_CMD_GO)
239 while(1) { 294 /* nop */;
240 if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO))
241 break;
242 }
243 295
244 /* Wait for the command to come back */ 296 /* Wait for the command to come back */
245
246 if (wait) { 297 if (wait) {
247 u32 status = au_readl(HOST_STATUS(host)); 298 u32 status = au_readl(HOST_STATUS(host));
248 299
249 while(!(status & SD_STATUS_CR)) 300 while (!(status & SD_STATUS_CR))
250 status = au_readl(HOST_STATUS(host)); 301 status = au_readl(HOST_STATUS(host));
251 302
252 /* Clear the CR status */ 303 /* Clear the CR status */
@@ -260,12 +311,11 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
260 311
261static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) 312static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
262{ 313{
263
264 struct mmc_request *mrq = host->mrq; 314 struct mmc_request *mrq = host->mrq;
265 struct mmc_data *data; 315 struct mmc_data *data;
266 u32 crc; 316 u32 crc;
267 317
268 WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP); 318 WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
269 319
270 if (host->mrq == NULL) 320 if (host->mrq == NULL)
271 return; 321 return;
@@ -276,15 +326,13 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
276 status = au_readl(HOST_STATUS(host)); 326 status = au_readl(HOST_STATUS(host));
277 327
278 /* The transaction is really over when the SD_STATUS_DB bit is clear */ 328 /* The transaction is really over when the SD_STATUS_DB bit is clear */
279 329 while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
280 while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
281 status = au_readl(HOST_STATUS(host)); 330 status = au_readl(HOST_STATUS(host));
282 331
283 data->error = 0; 332 data->error = 0;
284 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); 333 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
285 334
286 /* Process any errors */ 335 /* Process any errors */
287
288 crc = (status & (SD_STATUS_WC | SD_STATUS_RC)); 336 crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
289 if (host->flags & HOST_F_XMIT) 337 if (host->flags & HOST_F_XMIT)
290 crc |= ((status & 0x07) == 0x02) ? 0 : 1; 338 crc |= ((status & 0x07) == 0x02) ? 0 : 1;
@@ -299,16 +347,16 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
299 347
300 if (!data->error) { 348 if (!data->error) {
301 if (host->flags & HOST_F_DMA) { 349 if (host->flags & HOST_F_DMA) {
350#ifdef CONFIG_SOC_AU1200 /* DBDMA */
302 u32 chan = DMA_CHANNEL(host); 351 u32 chan = DMA_CHANNEL(host);
303 352
304 chan_tab_t *c = *((chan_tab_t **) chan); 353 chan_tab_t *c = *((chan_tab_t **)chan);
305 au1x_dma_chan_t *cp = c->chan_ptr; 354 au1x_dma_chan_t *cp = c->chan_ptr;
306 data->bytes_xfered = cp->ddma_bytecnt; 355 data->bytes_xfered = cp->ddma_bytecnt;
307 } 356#endif
308 else 357 } else
309 data->bytes_xfered = 358 data->bytes_xfered =
310 (data->blocks * data->blksz) - 359 (data->blocks * data->blksz) - host->pio.len;
311 host->pio.len;
312 } 360 }
313 361
314 au1xmmc_finish_request(host); 362 au1xmmc_finish_request(host);
@@ -316,7 +364,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
316 364
317static void au1xmmc_tasklet_data(unsigned long param) 365static void au1xmmc_tasklet_data(unsigned long param)
318{ 366{
319 struct au1xmmc_host *host = (struct au1xmmc_host *) param; 367 struct au1xmmc_host *host = (struct au1xmmc_host *)param;
320 368
321 u32 status = au_readl(HOST_STATUS(host)); 369 u32 status = au_readl(HOST_STATUS(host));
322 au1xmmc_data_complete(host, status); 370 au1xmmc_data_complete(host, status);
@@ -326,11 +374,10 @@ static void au1xmmc_tasklet_data(unsigned long param)
326 374
327static void au1xmmc_send_pio(struct au1xmmc_host *host) 375static void au1xmmc_send_pio(struct au1xmmc_host *host)
328{ 376{
329 377 struct mmc_data *data;
330 struct mmc_data *data = 0; 378 int sg_len, max, count;
331 int sg_len, max, count = 0; 379 unsigned char *sg_ptr, val;
332 unsigned char *sg_ptr; 380 u32 status;
333 u32 status = 0;
334 struct scatterlist *sg; 381 struct scatterlist *sg;
335 382
336 data = host->mrq->data; 383 data = host->mrq->data;
@@ -345,14 +392,12 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
345 /* This is the space left inside the buffer */ 392 /* This is the space left inside the buffer */
346 sg_len = data->sg[host->pio.index].length - host->pio.offset; 393 sg_len = data->sg[host->pio.index].length - host->pio.offset;
347 394
348 /* Check to if we need less then the size of the sg_buffer */ 395 /* Check if we need less than the size of the sg_buffer */
349
350 max = (sg_len > host->pio.len) ? host->pio.len : sg_len; 396 max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
351 if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; 397 if (max > AU1XMMC_MAX_TRANSFER)
352 398 max = AU1XMMC_MAX_TRANSFER;
353 for(count = 0; count < max; count++ ) {
354 unsigned char val;
355 399
400 for (count = 0; count < max; count++) {
356 status = au_readl(HOST_STATUS(host)); 401 status = au_readl(HOST_STATUS(host));
357 402
358 if (!(status & SD_STATUS_TH)) 403 if (!(status & SD_STATUS_TH))
@@ -360,7 +405,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
360 405
361 val = *sg_ptr++; 406 val = *sg_ptr++;
362 407
363 au_writel((unsigned long) val, HOST_TXPORT(host)); 408 au_writel((unsigned long)val, HOST_TXPORT(host));
364 au_sync(); 409 au_sync();
365 } 410 }
366 411
@@ -384,11 +429,10 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
384 429
385static void au1xmmc_receive_pio(struct au1xmmc_host *host) 430static void au1xmmc_receive_pio(struct au1xmmc_host *host)
386{ 431{
387 432 struct mmc_data *data;
388 struct mmc_data *data = 0; 433 int max, count, sg_len = 0;
389 int sg_len = 0, max = 0, count = 0; 434 unsigned char *sg_ptr = NULL;
390 unsigned char *sg_ptr = 0; 435 u32 status, val;
391 u32 status = 0;
392 struct scatterlist *sg; 436 struct scatterlist *sg;
393 437
394 data = host->mrq->data; 438 data = host->mrq->data;
@@ -405,33 +449,33 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
405 /* This is the space left inside the buffer */ 449 /* This is the space left inside the buffer */
406 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; 450 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
407 451
408 /* Check to if we need less then the size of the sg_buffer */ 452 /* Check if we need less than the size of the sg_buffer */
409 if (sg_len < max) max = sg_len; 453 if (sg_len < max)
454 max = sg_len;
410 } 455 }
411 456
412 if (max > AU1XMMC_MAX_TRANSFER) 457 if (max > AU1XMMC_MAX_TRANSFER)
413 max = AU1XMMC_MAX_TRANSFER; 458 max = AU1XMMC_MAX_TRANSFER;
414 459
415 for(count = 0; count < max; count++ ) { 460 for (count = 0; count < max; count++) {
416 u32 val;
417 status = au_readl(HOST_STATUS(host)); 461 status = au_readl(HOST_STATUS(host));
418 462
419 if (!(status & SD_STATUS_NE)) 463 if (!(status & SD_STATUS_NE))
420 break; 464 break;
421 465
422 if (status & SD_STATUS_RC) { 466 if (status & SD_STATUS_RC) {
423 DBG("RX CRC Error [%d + %d].\n", host->id, 467 DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
424 host->pio.len, count); 468 host->pio.len, count);
425 break; 469 break;
426 } 470 }
427 471
428 if (status & SD_STATUS_RO) { 472 if (status & SD_STATUS_RO) {
429 DBG("RX Overrun [%d + %d]\n", host->id, 473 DBG("RX Overrun [%d + %d]\n", host->pdev->id,
430 host->pio.len, count); 474 host->pio.len, count);
431 break; 475 break;
432 } 476 }
433 else if (status & SD_STATUS_RU) { 477 else if (status & SD_STATUS_RU) {
434 DBG("RX Underrun [%d + %d]\n", host->id, 478 DBG("RX Underrun [%d + %d]\n", host->pdev->id,
435 host->pio.len, count); 479 host->pio.len, count);
436 break; 480 break;
437 } 481 }
@@ -439,7 +483,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
439 val = au_readl(HOST_RXPORT(host)); 483 val = au_readl(HOST_RXPORT(host));
440 484
441 if (sg_ptr) 485 if (sg_ptr)
442 *sg_ptr++ = (unsigned char) (val & 0xFF); 486 *sg_ptr++ = (unsigned char)(val & 0xFF);
443 } 487 }
444 488
445 host->pio.len -= count; 489 host->pio.len -= count;
@@ -451,7 +495,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
451 } 495 }
452 496
453 if (host->pio.len == 0) { 497 if (host->pio.len == 0) {
454 //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); 498 /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
455 IRQ_OFF(host, SD_CONFIG_NE); 499 IRQ_OFF(host, SD_CONFIG_NE);
456 500
457 if (host->flags & HOST_F_STOP) 501 if (host->flags & HOST_F_STOP)
@@ -461,17 +505,15 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
461 } 505 }
462} 506}
463 507
464/* static void au1xmmc_cmd_complete 508/* This is called when a command has been completed - grab the response
465 This is called when a command has been completed - grab the response 509 * and check for errors. Then start the data transfer if it is indicated.
466 and check for errors. Then start the data transfer if it is indicated. 510 */
467*/
468
469static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) 511static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
470{ 512{
471
472 struct mmc_request *mrq = host->mrq; 513 struct mmc_request *mrq = host->mrq;
473 struct mmc_command *cmd; 514 struct mmc_command *cmd;
474 int trans; 515 u32 r[4];
516 int i, trans;
475 517
476 if (!host->mrq) 518 if (!host->mrq)
477 return; 519 return;
@@ -481,9 +523,6 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
481 523
482 if (cmd->flags & MMC_RSP_PRESENT) { 524 if (cmd->flags & MMC_RSP_PRESENT) {
483 if (cmd->flags & MMC_RSP_136) { 525 if (cmd->flags & MMC_RSP_136) {
484 u32 r[4];
485 int i;
486
487 r[0] = au_readl(host->iobase + SD_RESP3); 526 r[0] = au_readl(host->iobase + SD_RESP3);
488 r[1] = au_readl(host->iobase + SD_RESP2); 527 r[1] = au_readl(host->iobase + SD_RESP2);
489 r[2] = au_readl(host->iobase + SD_RESP1); 528 r[2] = au_readl(host->iobase + SD_RESP1);
@@ -491,10 +530,9 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
491 530
492 /* The CRC is omitted from the response, so really 531 /* The CRC is omitted from the response, so really
493 * we only got 120 bytes, but the engine expects 532 * we only got 120 bytes, but the engine expects
494 * 128 bits, so we have to shift things up 533 * 128 bits, so we have to shift things up.
495 */ 534 */
496 535 for (i = 0; i < 4; i++) {
497 for(i = 0; i < 4; i++) {
498 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8; 536 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
499 if (i != 3) 537 if (i != 3)
500 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24; 538 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
@@ -505,22 +543,20 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
505 * our response omits the CRC, our data ends up 543 * our response omits the CRC, our data ends up
506 * being shifted 8 bits to the right. In this case, 544 * being shifted 8 bits to the right. In this case,
507 * that means that the OSR data starts at bit 31, 545 * that means that the OSR data starts at bit 31,
508 * so we can just read RESP0 and return that 546 * so we can just read RESP0 and return that.
509 */ 547 */
510 cmd->resp[0] = au_readl(host->iobase + SD_RESP0); 548 cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
511 } 549 }
512 } 550 }
513 551
514 /* Figure out errors */ 552 /* Figure out errors */
515
516 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC)) 553 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
517 cmd->error = -EILSEQ; 554 cmd->error = -EILSEQ;
518 555
519 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV); 556 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
520 557
521 if (!trans || cmd->error) { 558 if (!trans || cmd->error) {
522 559 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
523 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF);
524 tasklet_schedule(&host->finish_task); 560 tasklet_schedule(&host->finish_task);
525 return; 561 return;
526 } 562 }
@@ -528,6 +564,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
528 host->status = HOST_S_DATA; 564 host->status = HOST_S_DATA;
529 565
530 if (host->flags & HOST_F_DMA) { 566 if (host->flags & HOST_F_DMA) {
567#ifdef CONFIG_SOC_AU1200 /* DBDMA */
531 u32 channel = DMA_CHANNEL(host); 568 u32 channel = DMA_CHANNEL(host);
532 569
533 /* Start the DMA as soon as the buffer gets something in it */ 570 /* Start the DMA as soon as the buffer gets something in it */
@@ -540,23 +577,21 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
540 } 577 }
541 578
542 au1xxx_dbdma_start(channel); 579 au1xxx_dbdma_start(channel);
580#endif
543 } 581 }
544} 582}
545 583
546static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) 584static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
547{ 585{
548
549 unsigned int pbus = get_au1x00_speed(); 586 unsigned int pbus = get_au1x00_speed();
550 unsigned int divisor; 587 unsigned int divisor;
551 u32 config; 588 u32 config;
552 589
553 /* From databook: 590 /* From databook:
554 divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 591 * divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
555 */ 592 */
556
557 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2); 593 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
558 pbus /= 2; 594 pbus /= 2;
559
560 divisor = ((pbus / rate) / 2) - 1; 595 divisor = ((pbus / rate) / 2) - 1;
561 596
562 config = au_readl(HOST_CONFIG(host)); 597 config = au_readl(HOST_CONFIG(host));
@@ -568,15 +603,11 @@ static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
568 au_sync(); 603 au_sync();
569} 604}
570 605
571static int 606static int au1xmmc_prepare_data(struct au1xmmc_host *host,
572au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) 607 struct mmc_data *data)
573{ 608{
574
575 int datalen = data->blocks * data->blksz; 609 int datalen = data->blocks * data->blksz;
576 610
577 if (dma != 0)
578 host->flags |= HOST_F_DMA;
579
580 if (data->flags & MMC_DATA_READ) 611 if (data->flags & MMC_DATA_READ)
581 host->flags |= HOST_F_RECV; 612 host->flags |= HOST_F_RECV;
582 else 613 else
@@ -596,12 +627,13 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
596 au_writel(data->blksz - 1, HOST_BLKSIZE(host)); 627 au_writel(data->blksz - 1, HOST_BLKSIZE(host));
597 628
598 if (host->flags & HOST_F_DMA) { 629 if (host->flags & HOST_F_DMA) {
630#ifdef CONFIG_SOC_AU1200 /* DBDMA */
599 int i; 631 int i;
600 u32 channel = DMA_CHANNEL(host); 632 u32 channel = DMA_CHANNEL(host);
601 633
602 au1xxx_dbdma_stop(channel); 634 au1xxx_dbdma_stop(channel);
603 635
604 for(i = 0; i < host->dma.len; i++) { 636 for (i = 0; i < host->dma.len; i++) {
605 u32 ret = 0, flags = DDMA_FLAGS_NOIE; 637 u32 ret = 0, flags = DDMA_FLAGS_NOIE;
606 struct scatterlist *sg = &data->sg[i]; 638 struct scatterlist *sg = &data->sg[i];
607 int sg_len = sg->length; 639 int sg_len = sg->length;
@@ -611,23 +643,21 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
611 if (i == host->dma.len - 1) 643 if (i == host->dma.len - 1)
612 flags = DDMA_FLAGS_IE; 644 flags = DDMA_FLAGS_IE;
613 645
614 if (host->flags & HOST_F_XMIT){ 646 if (host->flags & HOST_F_XMIT) {
615 ret = au1xxx_dbdma_put_source_flags(channel, 647 ret = au1xxx_dbdma_put_source_flags(channel,
616 (void *) sg_virt(sg), len, flags); 648 (void *)sg_virt(sg), len, flags);
617 } 649 } else {
618 else { 650 ret = au1xxx_dbdma_put_dest_flags(channel,
619 ret = au1xxx_dbdma_put_dest_flags(channel, 651 (void *)sg_virt(sg), len, flags);
620 (void *) sg_virt(sg),
621 len, flags);
622 } 652 }
623 653
624 if (!ret) 654 if (!ret)
625 goto dataerr; 655 goto dataerr;
626 656
627 datalen -= len; 657 datalen -= len;
628 } 658 }
629 } 659#endif
630 else { 660 } else {
631 host->pio.index = 0; 661 host->pio.index = 0;
632 host->pio.offset = 0; 662 host->pio.offset = 0;
633 host->pio.len = datalen; 663 host->pio.len = datalen;
@@ -636,25 +666,21 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
636 IRQ_ON(host, SD_CONFIG_TH); 666 IRQ_ON(host, SD_CONFIG_TH);
637 else 667 else
638 IRQ_ON(host, SD_CONFIG_NE); 668 IRQ_ON(host, SD_CONFIG_NE);
639 //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF); 669 /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */
640 } 670 }
641 671
642 return 0; 672 return 0;
643 673
644 dataerr: 674dataerr:
645 dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir); 675 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
676 host->dma.dir);
646 return -ETIMEDOUT; 677 return -ETIMEDOUT;
647} 678}
648 679
649/* static void au1xmmc_request 680/* This actually starts a command or data transaction */
650 This actually starts a command or data transaction
651*/
652
653static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) 681static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
654{ 682{
655
656 struct au1xmmc_host *host = mmc_priv(mmc); 683 struct au1xmmc_host *host = mmc_priv(mmc);
657 unsigned int flags = 0;
658 int ret = 0; 684 int ret = 0;
659 685
660 WARN_ON(irqs_disabled()); 686 WARN_ON(irqs_disabled());
@@ -663,11 +689,15 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
663 host->mrq = mrq; 689 host->mrq = mrq;
664 host->status = HOST_S_CMD; 690 host->status = HOST_S_CMD;
665 691
666 bcsr->disk_leds &= ~(1 << 8); 692 /* fail request immediately if no card is present */
693 if (0 == au1xmmc_card_inserted(mmc)) {
694 mrq->cmd->error = -ENOMEDIUM;
695 au1xmmc_finish_request(host);
696 return;
697 }
667 698
668 if (mrq->data) { 699 if (mrq->data) {
669 FLUSH_FIFO(host); 700 FLUSH_FIFO(host);
670 flags = mrq->data->flags;
671 ret = au1xmmc_prepare_data(host, mrq->data); 701 ret = au1xmmc_prepare_data(host, mrq->data);
672 } 702 }
673 703
@@ -682,7 +712,6 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
682 712
683static void au1xmmc_reset_controller(struct au1xmmc_host *host) 713static void au1xmmc_reset_controller(struct au1xmmc_host *host)
684{ 714{
685
686 /* Apply the clock */ 715 /* Apply the clock */
687 au_writel(SD_ENABLE_CE, HOST_ENABLE(host)); 716 au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
688 au_sync_delay(1); 717 au_sync_delay(1);
@@ -712,9 +741,10 @@ static void au1xmmc_reset_controller(struct au1xmmc_host *host)
712} 741}
713 742
714 743
715static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios) 744static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
716{ 745{
717 struct au1xmmc_host *host = mmc_priv(mmc); 746 struct au1xmmc_host *host = mmc_priv(mmc);
747 u32 config2;
718 748
719 if (ios->power_mode == MMC_POWER_OFF) 749 if (ios->power_mode == MMC_POWER_OFF)
720 au1xmmc_set_power(host, 0); 750 au1xmmc_set_power(host, 0);
@@ -726,21 +756,18 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
726 au1xmmc_set_clock(host, ios->clock); 756 au1xmmc_set_clock(host, ios->clock);
727 host->clock = ios->clock; 757 host->clock = ios->clock;
728 } 758 }
729}
730
731static void au1xmmc_dma_callback(int irq, void *dev_id)
732{
733 struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id;
734
735 /* Avoid spurious interrupts */
736 759
737 if (!host->mrq) 760 config2 = au_readl(HOST_CONFIG2(host));
738 return; 761 switch (ios->bus_width) {
739 762 case MMC_BUS_WIDTH_4:
740 if (host->flags & HOST_F_STOP) 763 config2 |= SD_CONFIG2_WB;
741 SEND_STOP(host); 764 break;
742 765 case MMC_BUS_WIDTH_1:
743 tasklet_schedule(&host->data_task); 766 config2 &= ~SD_CONFIG2_WB;
767 break;
768 }
769 au_writel(config2, HOST_CONFIG2(host));
770 au_sync();
744} 771}
745 772
746#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT) 773#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
@@ -749,245 +776,354 @@ static void au1xmmc_dma_callback(int irq, void *dev_id)
749 776
750static irqreturn_t au1xmmc_irq(int irq, void *dev_id) 777static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
751{ 778{
752 779 struct au1xmmc_host *host = dev_id;
753 u32 status; 780 u32 status;
754 int i, ret = 0;
755
756 disable_irq(AU1100_SD_IRQ);
757 781
758 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { 782 status = au_readl(HOST_STATUS(host));
759 struct au1xmmc_host * host = au1xmmc_hosts[i];
760 u32 handled = 1;
761 783
762 status = au_readl(HOST_STATUS(host)); 784 if (!(status & SD_STATUS_I))
785 return IRQ_NONE; /* not ours */
763 786
764 if (host->mrq && (status & STATUS_TIMEOUT)) { 787 if (status & SD_STATUS_SI) /* SDIO */
765 if (status & SD_STATUS_RAT) 788 mmc_signal_sdio_irq(host->mmc);
766 host->mrq->cmd->error = -ETIMEDOUT;
767 789
768 else if (status & SD_STATUS_DT) 790 if (host->mrq && (status & STATUS_TIMEOUT)) {
769 host->mrq->data->error = -ETIMEDOUT; 791 if (status & SD_STATUS_RAT)
792 host->mrq->cmd->error = -ETIMEDOUT;
793 else if (status & SD_STATUS_DT)
794 host->mrq->data->error = -ETIMEDOUT;
770 795
771 /* In PIO mode, interrupts might still be enabled */ 796 /* In PIO mode, interrupts might still be enabled */
772 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); 797 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
773 798
774 //IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF); 799 /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
775 tasklet_schedule(&host->finish_task); 800 tasklet_schedule(&host->finish_task);
776 } 801 }
777#if 0 802#if 0
778 else if (status & SD_STATUS_DD) { 803 else if (status & SD_STATUS_DD) {
779 804 /* Sometimes we get a DD before a NE in PIO mode */
780 /* Sometimes we get a DD before a NE in PIO mode */ 805 if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
781 806 au1xmmc_receive_pio(host);
782 if (!(host->flags & HOST_F_DMA) && 807 else {
783 (status & SD_STATUS_NE)) 808 au1xmmc_data_complete(host, status);
784 au1xmmc_receive_pio(host); 809 /* tasklet_schedule(&host->data_task); */
785 else {
786 au1xmmc_data_complete(host, status);
787 //tasklet_schedule(&host->data_task);
788 }
789 } 810 }
811 }
790#endif 812#endif
791 else if (status & (SD_STATUS_CR)) { 813 else if (status & SD_STATUS_CR) {
792 if (host->status == HOST_S_CMD) 814 if (host->status == HOST_S_CMD)
793 au1xmmc_cmd_complete(host,status); 815 au1xmmc_cmd_complete(host, status);
794 } 816
795 else if (!(host->flags & HOST_F_DMA)) { 817 } else if (!(host->flags & HOST_F_DMA)) {
796 if ((host->flags & HOST_F_XMIT) && 818 if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
797 (status & STATUS_DATA_OUT)) 819 au1xmmc_send_pio(host);
798 au1xmmc_send_pio(host); 820 else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
799 else if ((host->flags & HOST_F_RECV) && 821 au1xmmc_receive_pio(host);
800 (status & STATUS_DATA_IN)) 822
801 au1xmmc_receive_pio(host); 823 } else if (status & 0x203F3C70) {
802 } 824 DBG("Unhandled status %8.8x\n", host->pdev->id,
803 else if (status & 0x203FBC70) { 825 status);
804 DBG("Unhandled status %8.8x\n", host->id, status);
805 handled = 0;
806 }
807
808 au_writel(status, HOST_STATUS(host));
809 au_sync();
810
811 ret |= handled;
812 } 826 }
813 827
814 enable_irq(AU1100_SD_IRQ); 828 au_writel(status, HOST_STATUS(host));
815 return ret; 829 au_sync();
830
831 return IRQ_HANDLED;
816} 832}
817 833
818static void au1xmmc_poll_event(unsigned long arg) 834#ifdef CONFIG_SOC_AU1200
819{ 835/* 8bit memory DMA device */
820 struct au1xmmc_host *host = (struct au1xmmc_host *) arg; 836static dbdev_tab_t au1xmmc_mem_dbdev = {
837 .dev_id = DSCR_CMD0_ALWAYS,
838 .dev_flags = DEV_FLAGS_ANYUSE,
839 .dev_tsize = 0,
840 .dev_devwidth = 8,
841 .dev_physaddr = 0x00000000,
842 .dev_intlevel = 0,
843 .dev_intpolarity = 0,
844};
845static int memid;
821 846
822 int card = au1xmmc_card_inserted(host); 847static void au1xmmc_dbdma_callback(int irq, void *dev_id)
823 int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0; 848{
849 struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
824 850
825 if (card != controller) { 851 /* Avoid spurious interrupts */
826 host->flags &= ~HOST_F_ACTIVE; 852 if (!host->mrq)
827 if (card) host->flags |= HOST_F_ACTIVE; 853 return;
828 mmc_detect_change(host->mmc, 0);
829 }
830 854
831 if (host->mrq != NULL) { 855 if (host->flags & HOST_F_STOP)
832 u32 status = au_readl(HOST_STATUS(host)); 856 SEND_STOP(host);
833 DBG("PENDING - %8.8x\n", host->id, status);
834 }
835 857
836 mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT); 858 tasklet_schedule(&host->data_task);
837} 859}
838 860
839static dbdev_tab_t au1xmmc_mem_dbdev = 861static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
840{
841 DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0
842};
843
844static void au1xmmc_init_dma(struct au1xmmc_host *host)
845{ 862{
863 struct resource *res;
864 int txid, rxid;
865
866 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
867 if (!res)
868 return -ENODEV;
869 txid = res->start;
870
871 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
872 if (!res)
873 return -ENODEV;
874 rxid = res->start;
875
876 if (!memid)
877 return -ENODEV;
878
879 host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
880 au1xmmc_dbdma_callback, (void *)host);
881 if (!host->tx_chan) {
882 dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
883 return -ENODEV;
884 }
846 885
847 u32 rxchan, txchan; 886 host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
848 887 au1xmmc_dbdma_callback, (void *)host);
849 int txid = au1xmmc_card_table[host->id].tx_devid; 888 if (!host->rx_chan) {
850 int rxid = au1xmmc_card_table[host->id].rx_devid; 889 dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
890 au1xxx_dbdma_chan_free(host->tx_chan);
891 return -ENODEV;
892 }
851 893
852 /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride 894 au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
853 of 8 bits. And since devices are shared, we need to create 895 au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
854 our own to avoid freaking out other devices
855 */
856 896
857 int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); 897 au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
898 au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
858 899
859 txchan = au1xxx_dbdma_chan_alloc(memid, txid, 900 /* DBDMA is good to go */
860 au1xmmc_dma_callback, (void *) host); 901 host->flags |= HOST_F_DMA;
861 902
862 rxchan = au1xxx_dbdma_chan_alloc(rxid, memid, 903 return 0;
863 au1xmmc_dma_callback, (void *) host); 904}
864 905
865 au1xxx_dbdma_set_devwidth(txchan, 8); 906static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
866 au1xxx_dbdma_set_devwidth(rxchan, 8); 907{
908 if (host->flags & HOST_F_DMA) {
909 host->flags &= ~HOST_F_DMA;
910 au1xxx_dbdma_chan_free(host->tx_chan);
911 au1xxx_dbdma_chan_free(host->rx_chan);
912 }
913}
914#endif
867 915
868 au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT); 916static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
869 au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT); 917{
918 struct au1xmmc_host *host = mmc_priv(mmc);
870 919
871 host->tx_chan = txchan; 920 if (en)
872 host->rx_chan = rxchan; 921 IRQ_ON(host, SD_CONFIG_SI);
922 else
923 IRQ_OFF(host, SD_CONFIG_SI);
873} 924}
874 925
875static const struct mmc_host_ops au1xmmc_ops = { 926static const struct mmc_host_ops au1xmmc_ops = {
876 .request = au1xmmc_request, 927 .request = au1xmmc_request,
877 .set_ios = au1xmmc_set_ios, 928 .set_ios = au1xmmc_set_ios,
878 .get_ro = au1xmmc_card_readonly, 929 .get_ro = au1xmmc_card_readonly,
930 .get_cd = au1xmmc_card_inserted,
931 .enable_sdio_irq = au1xmmc_enable_sdio_irq,
879}; 932};
880 933
881static int __devinit au1xmmc_probe(struct platform_device *pdev) 934static int __devinit au1xmmc_probe(struct platform_device *pdev)
882{ 935{
936 struct mmc_host *mmc;
937 struct au1xmmc_host *host;
938 struct resource *r;
939 int ret;
940
941 mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
942 if (!mmc) {
943 dev_err(&pdev->dev, "no memory for mmc_host\n");
944 ret = -ENOMEM;
945 goto out0;
946 }
883 947
884 int i, ret = 0; 948 host = mmc_priv(mmc);
885 949 host->mmc = mmc;
886 /* THe interrupt is shared among all controllers */ 950 host->platdata = pdev->dev.platform_data;
887 ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0); 951 host->pdev = pdev;
888 952
889 if (ret) { 953 ret = -ENODEV;
890 printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n", 954 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
891 AU1100_SD_IRQ, ret); 955 if (!r) {
892 return -ENXIO; 956 dev_err(&pdev->dev, "no mmio defined\n");
957 goto out1;
893 } 958 }
894 959
895 disable_irq(AU1100_SD_IRQ); 960 host->ioarea = request_mem_region(r->start, r->end - r->start + 1,
961 pdev->name);
962 if (!host->ioarea) {
963 dev_err(&pdev->dev, "mmio already in use\n");
964 goto out1;
965 }
896 966
897 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { 967 host->iobase = (unsigned long)ioremap(r->start, 0x3c);
898 struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); 968 if (!host->iobase) {
899 struct au1xmmc_host *host = 0; 969 dev_err(&pdev->dev, "cannot remap mmio\n");
970 goto out2;
971 }
900 972
901 if (!mmc) { 973 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
902 printk(DRIVER_NAME "ERROR: no mem for host %d\n", i); 974 if (!r) {
903 au1xmmc_hosts[i] = 0; 975 dev_err(&pdev->dev, "no IRQ defined\n");
904 continue; 976 goto out3;
905 } 977 }
906 978
907 mmc->ops = &au1xmmc_ops; 979 host->irq = r->start;
980 /* IRQ is shared among both SD controllers */
981 ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED,
982 DRIVER_NAME, host);
983 if (ret) {
984 dev_err(&pdev->dev, "cannot grab IRQ\n");
985 goto out3;
986 }
908 987
909 mmc->f_min = 450000; 988 mmc->ops = &au1xmmc_ops;
910 mmc->f_max = 24000000;
911 989
912 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 990 mmc->f_min = 450000;
913 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; 991 mmc->f_max = 24000000;
914 992
915 mmc->max_blk_size = 2048; 993 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
916 mmc->max_blk_count = 512; 994 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
917 995
918 mmc->ocr_avail = AU1XMMC_OCR; 996 mmc->max_blk_size = 2048;
997 mmc->max_blk_count = 512;
919 998
920 host = mmc_priv(mmc); 999 mmc->ocr_avail = AU1XMMC_OCR;
921 host->mmc = mmc; 1000 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
922 1001
923 host->id = i; 1002 host->status = HOST_S_IDLE;
924 host->iobase = au1xmmc_card_table[host->id].iobase;
925 host->clock = 0;
926 host->power_mode = MMC_POWER_OFF;
927 1003
928 host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0; 1004 /* board-specific carddetect setup, if any */
929 host->status = HOST_S_IDLE; 1005 if (host->platdata && host->platdata->cd_setup) {
1006 ret = host->platdata->cd_setup(mmc, 1);
1007 if (ret) {
1008 dev_warn(&pdev->dev, "board CD setup failed\n");
1009 mmc->caps |= MMC_CAP_NEEDS_POLL;
1010 }
1011 } else
1012 mmc->caps |= MMC_CAP_NEEDS_POLL;
930 1013
931 init_timer(&host->timer); 1014 tasklet_init(&host->data_task, au1xmmc_tasklet_data,
1015 (unsigned long)host);
932 1016
933 host->timer.function = au1xmmc_poll_event; 1017 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
934 host->timer.data = (unsigned long) host; 1018 (unsigned long)host);
935 host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT;
936 1019
937 tasklet_init(&host->data_task, au1xmmc_tasklet_data, 1020#ifdef CONFIG_SOC_AU1200
938 (unsigned long) host); 1021 ret = au1xmmc_dbdma_init(host);
1022 if (ret)
1023 printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n");
1024#endif
939 1025
940 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, 1026#ifdef CONFIG_LEDS_CLASS
941 (unsigned long) host); 1027 if (host->platdata && host->platdata->led) {
1028 struct led_classdev *led = host->platdata->led;
1029 led->name = mmc_hostname(mmc);
1030 led->brightness = LED_OFF;
1031 led->default_trigger = mmc_hostname(mmc);
1032 ret = led_classdev_register(mmc_dev(mmc), led);
1033 if (ret)
1034 goto out5;
1035 }
1036#endif
942 1037
943 spin_lock_init(&host->lock); 1038 au1xmmc_reset_controller(host);
944 1039
945 if (dma != 0) 1040 ret = mmc_add_host(mmc);
946 au1xmmc_init_dma(host); 1041 if (ret) {
1042 dev_err(&pdev->dev, "cannot add mmc host\n");
1043 goto out6;
1044 }
947 1045
948 au1xmmc_reset_controller(host); 1046 platform_set_drvdata(pdev, mmc);
949 1047
950 mmc_add_host(mmc); 1048 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
951 au1xmmc_hosts[i] = host; 1049 " (mode=%s)\n", pdev->id, host->iobase,
1050 host->flags & HOST_F_DMA ? "dma" : "pio");
952 1051
953 add_timer(&host->timer); 1052 return 0; /* all ok */
954 1053
955 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n", 1054out6:
956 host->id, host->iobase, dma ? "dma" : "pio"); 1055#ifdef CONFIG_LEDS_CLASS
957 } 1056 if (host->platdata && host->platdata->led)
1057 led_classdev_unregister(host->platdata->led);
1058out5:
1059#endif
1060 au_writel(0, HOST_ENABLE(host));
1061 au_writel(0, HOST_CONFIG(host));
1062 au_writel(0, HOST_CONFIG2(host));
1063 au_sync();
958 1064
959 enable_irq(AU1100_SD_IRQ); 1065#ifdef CONFIG_SOC_AU1200
1066 au1xmmc_dbdma_shutdown(host);
1067#endif
960 1068
961 return 0; 1069 tasklet_kill(&host->data_task);
1070 tasklet_kill(&host->finish_task);
1071
1072 if (host->platdata && host->platdata->cd_setup &&
1073 !(mmc->caps & MMC_CAP_NEEDS_POLL))
1074 host->platdata->cd_setup(mmc, 0);
1075
1076 free_irq(host->irq, host);
1077out3:
1078 iounmap((void *)host->iobase);
1079out2:
1080 release_resource(host->ioarea);
1081 kfree(host->ioarea);
1082out1:
1083 mmc_free_host(mmc);
1084out0:
1085 return ret;
962} 1086}
963 1087
964static int __devexit au1xmmc_remove(struct platform_device *pdev) 1088static int __devexit au1xmmc_remove(struct platform_device *pdev)
965{ 1089{
1090 struct mmc_host *mmc = platform_get_drvdata(pdev);
1091 struct au1xmmc_host *host;
1092
1093 if (mmc) {
1094 host = mmc_priv(mmc);
966 1095
967 int i; 1096 mmc_remove_host(mmc);
968 1097
969 disable_irq(AU1100_SD_IRQ); 1098#ifdef CONFIG_LEDS_CLASS
1099 if (host->platdata && host->platdata->led)
1100 led_classdev_unregister(host->platdata->led);
1101#endif
970 1102
971 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { 1103 if (host->platdata && host->platdata->cd_setup &&
972 struct au1xmmc_host *host = au1xmmc_hosts[i]; 1104 !(mmc->caps & MMC_CAP_NEEDS_POLL))
973 if (!host) continue; 1105 host->platdata->cd_setup(mmc, 0);
1106
1107 au_writel(0, HOST_ENABLE(host));
1108 au_writel(0, HOST_CONFIG(host));
1109 au_writel(0, HOST_CONFIG2(host));
1110 au_sync();
974 1111
975 tasklet_kill(&host->data_task); 1112 tasklet_kill(&host->data_task);
976 tasklet_kill(&host->finish_task); 1113 tasklet_kill(&host->finish_task);
977 1114
978 del_timer_sync(&host->timer); 1115#ifdef CONFIG_SOC_AU1200
1116 au1xmmc_dbdma_shutdown(host);
1117#endif
979 au1xmmc_set_power(host, 0); 1118 au1xmmc_set_power(host, 0);
980 1119
981 mmc_remove_host(host->mmc); 1120 free_irq(host->irq, host);
982 1121 iounmap((void *)host->iobase);
983 au1xxx_dbdma_chan_free(host->tx_chan); 1122 release_resource(host->ioarea);
984 au1xxx_dbdma_chan_free(host->rx_chan); 1123 kfree(host->ioarea);
985 1124
986 au_writel(0x0, HOST_ENABLE(host)); 1125 mmc_free_host(mmc);
987 au_sync();
988 } 1126 }
989
990 free_irq(AU1100_SD_IRQ, 0);
991 return 0; 1127 return 0;
992} 1128}
993 1129
@@ -1004,21 +1140,31 @@ static struct platform_driver au1xmmc_driver = {
1004 1140
1005static int __init au1xmmc_init(void) 1141static int __init au1xmmc_init(void)
1006{ 1142{
1143#ifdef CONFIG_SOC_AU1200
1144 /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
1145 * of 8 bits. And since devices are shared, we need to create
1146 * our own to avoid freaking out other devices.
1147 */
1148 memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
1149 if (!memid)
1150 printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n");
1151#endif
1007 return platform_driver_register(&au1xmmc_driver); 1152 return platform_driver_register(&au1xmmc_driver);
1008} 1153}
1009 1154
1010static void __exit au1xmmc_exit(void) 1155static void __exit au1xmmc_exit(void)
1011{ 1156{
1157#ifdef CONFIG_SOC_AU1200
1158 if (memid)
1159 au1xxx_ddma_del_device(memid);
1160#endif
1012 platform_driver_unregister(&au1xmmc_driver); 1161 platform_driver_unregister(&au1xmmc_driver);
1013} 1162}
1014 1163
1015module_init(au1xmmc_init); 1164module_init(au1xmmc_init);
1016module_exit(au1xmmc_exit); 1165module_exit(au1xmmc_exit);
1017 1166
1018#ifdef MODULE
1019MODULE_AUTHOR("Advanced Micro Devices, Inc"); 1167MODULE_AUTHOR("Advanced Micro Devices, Inc");
1020MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX"); 1168MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1021MODULE_LICENSE("GPL"); 1169MODULE_LICENSE("GPL");
1022MODULE_ALIAS("platform:au1xxx-mmc"); 1170MODULE_ALIAS("platform:au1xxx-mmc");
1023#endif
1024
diff --git a/drivers/mmc/host/au1xmmc.h b/drivers/mmc/host/au1xmmc.h
deleted file mode 100644
index 341cbdf0baca..000000000000
--- a/drivers/mmc/host/au1xmmc.h
+++ /dev/null
@@ -1,96 +0,0 @@
1#ifndef _AU1XMMC_H_
2#define _AU1XMMC_H_
3
4/* Hardware definitions */
5
6#define AU1XMMC_DESCRIPTOR_COUNT 1
7#define AU1XMMC_DESCRIPTOR_SIZE 2048
8
9#define AU1XMMC_OCR ( MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
10 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
11 MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
12
13/* Easy access macros */
14
15#define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
16#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
17#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
18#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
19#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
20#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
21#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
22#define HOST_CMD(h) ((h)->iobase + SD_CMD)
23#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
24#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
25#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
26
27#define DMA_CHANNEL(h) \
28 ( ((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
29
30/* This gives us a hard value for the stop command that we can write directly
31 * to the command register
32 */
33
34#define STOP_CMD (SD_CMD_RT_1B|SD_CMD_CT_7|(0xC << SD_CMD_CI_SHIFT)|SD_CMD_GO)
35
36/* This is the set of interrupts that we configure by default */
37
38#if 0
39#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_DD | \
40 SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I)
41#endif
42
43#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | \
44 SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I)
45/* The poll event (looking for insert/remove events runs twice a second */
46#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
47
48struct au1xmmc_host {
49 struct mmc_host *mmc;
50 struct mmc_request *mrq;
51
52 u32 id;
53
54 u32 flags;
55 u32 iobase;
56 u32 clock;
57 u32 bus_width;
58 u32 power_mode;
59
60 int status;
61
62 struct {
63 int len;
64 int dir;
65 } dma;
66
67 struct {
68 int index;
69 int offset;
70 int len;
71 } pio;
72
73 u32 tx_chan;
74 u32 rx_chan;
75
76 struct timer_list timer;
77 struct tasklet_struct finish_task;
78 struct tasklet_struct data_task;
79
80 spinlock_t lock;
81};
82
83/* Status flags used by the host structure */
84
85#define HOST_F_XMIT 0x0001
86#define HOST_F_RECV 0x0002
87#define HOST_F_DMA 0x0010
88#define HOST_F_ACTIVE 0x0100
89#define HOST_F_STOP 0x1000
90
91#define HOST_S_IDLE 0x0001
92#define HOST_S_CMD 0x0002
93#define HOST_S_DATA 0x0003
94#define HOST_S_STOP 0x0004
95
96#endif
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index eed211b2ac70..5e880c0f1349 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -892,9 +892,12 @@ static int imxmci_get_ro(struct mmc_host *mmc)
892 struct imxmci_host *host = mmc_priv(mmc); 892 struct imxmci_host *host = mmc_priv(mmc);
893 893
894 if (host->pdata && host->pdata->get_ro) 894 if (host->pdata && host->pdata->get_ro)
895 return host->pdata->get_ro(mmc_dev(mmc)); 895 return !!host->pdata->get_ro(mmc_dev(mmc));
896 /* Host doesn't support read only detection so assume writeable */ 896 /*
897 return 0; 897 * Board doesn't support read only detection; let the mmc core
898 * decide what to do.
899 */
900 return -ENOSYS;
898} 901}
899 902
900 903
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 35508584ac2a..41cc63360e43 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1126,16 +1126,28 @@ static int mmc_spi_get_ro(struct mmc_host *mmc)
1126 struct mmc_spi_host *host = mmc_priv(mmc); 1126 struct mmc_spi_host *host = mmc_priv(mmc);
1127 1127
1128 if (host->pdata && host->pdata->get_ro) 1128 if (host->pdata && host->pdata->get_ro)
1129 return host->pdata->get_ro(mmc->parent); 1129 return !!host->pdata->get_ro(mmc->parent);
1130 /* board doesn't support read only detection; assume writeable */ 1130 /*
1131 return 0; 1131 * Board doesn't support read only detection; let the mmc core
1132 * decide what to do.
1133 */
1134 return -ENOSYS;
1132} 1135}
1133 1136
1137static int mmc_spi_get_cd(struct mmc_host *mmc)
1138{
1139 struct mmc_spi_host *host = mmc_priv(mmc);
1140
1141 if (host->pdata && host->pdata->get_cd)
1142 return !!host->pdata->get_cd(mmc->parent);
1143 return -ENOSYS;
1144}
1134 1145
1135static const struct mmc_host_ops mmc_spi_ops = { 1146static const struct mmc_host_ops mmc_spi_ops = {
1136 .request = mmc_spi_request, 1147 .request = mmc_spi_request,
1137 .set_ios = mmc_spi_set_ios, 1148 .set_ios = mmc_spi_set_ios,
1138 .get_ro = mmc_spi_get_ro, 1149 .get_ro = mmc_spi_get_ro,
1150 .get_cd = mmc_spi_get_cd,
1139}; 1151};
1140 1152
1141 1153
@@ -1240,10 +1252,7 @@ static int mmc_spi_probe(struct spi_device *spi)
1240 mmc->ops = &mmc_spi_ops; 1252 mmc->ops = &mmc_spi_ops;
1241 mmc->max_blk_size = MMC_SPI_BLOCKSIZE; 1253 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1242 1254
1243 /* As long as we keep track of the number of successfully 1255 mmc->caps = MMC_CAP_SPI;
1244 * transmitted blocks, we're good for multiwrite.
1245 */
1246 mmc->caps = MMC_CAP_SPI | MMC_CAP_MULTIWRITE;
1247 1256
1248 /* SPI doesn't need the lowspeed device identification thing for 1257 /* SPI doesn't need the lowspeed device identification thing for
1249 * MMC or SD cards, since it never comes up in open drain mode. 1258 * MMC or SD cards, since it never comes up in open drain mode.
@@ -1319,17 +1328,23 @@ static int mmc_spi_probe(struct spi_device *spi)
1319 goto fail_glue_init; 1328 goto fail_glue_init;
1320 } 1329 }
1321 1330
1331 /* pass platform capabilities, if any */
1332 if (host->pdata)
1333 mmc->caps |= host->pdata->caps;
1334
1322 status = mmc_add_host(mmc); 1335 status = mmc_add_host(mmc);
1323 if (status != 0) 1336 if (status != 0)
1324 goto fail_add_host; 1337 goto fail_add_host;
1325 1338
1326 dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n", 1339 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1327 mmc->class_dev.bus_id, 1340 mmc->class_dev.bus_id,
1328 host->dma_dev ? "" : ", no DMA", 1341 host->dma_dev ? "" : ", no DMA",
1329 (host->pdata && host->pdata->get_ro) 1342 (host->pdata && host->pdata->get_ro)
1330 ? "" : ", no WP", 1343 ? "" : ", no WP",
1331 (host->pdata && host->pdata->setpower) 1344 (host->pdata && host->pdata->setpower)
1332 ? "" : ", no poweroff"); 1345 ? "" : ", no poweroff",
1346 (mmc->caps & MMC_CAP_NEEDS_POLL)
1347 ? ", cd polling" : "");
1333 return 0; 1348 return 0;
1334 1349
1335fail_add_host: 1350fail_add_host:
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index da5fecad74d9..696cf3647ceb 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -535,7 +535,6 @@ static int mmci_probe(struct amba_device *dev, void *id)
535 mmc->f_min = (host->mclk + 511) / 512; 535 mmc->f_min = (host->mclk + 511) / 512;
536 mmc->f_max = min(host->mclk, fmax); 536 mmc->f_max = min(host->mclk, fmax);
537 mmc->ocr_avail = plat->ocr_mask; 537 mmc->ocr_avail = plat->ocr_mask;
538 mmc->caps = MMC_CAP_MULTIWRITE;
539 538
540 /* 539 /*
541 * We can do SGIO 540 * We can do SGIO
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 549517c35675..dbc26eb6a89e 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1317,7 +1317,7 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1317 1317
1318 host->slots[id] = slot; 1318 host->slots[id] = slot;
1319 1319
1320 mmc->caps = MMC_CAP_MULTIWRITE; 1320 mmc->caps = 0;
1321 if (host->pdata->conf.wire4) 1321 if (host->pdata->conf.wire4)
1322 mmc->caps |= MMC_CAP_4_BIT_DATA; 1322 mmc->caps |= MMC_CAP_4_BIT_DATA;
1323 1323
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index d89475d36988..d39f59738866 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -374,9 +374,12 @@ static int pxamci_get_ro(struct mmc_host *mmc)
374 struct pxamci_host *host = mmc_priv(mmc); 374 struct pxamci_host *host = mmc_priv(mmc);
375 375
376 if (host->pdata && host->pdata->get_ro) 376 if (host->pdata && host->pdata->get_ro)
377 return host->pdata->get_ro(mmc_dev(mmc)); 377 return !!host->pdata->get_ro(mmc_dev(mmc));
378 /* Host doesn't support read only detection so assume writeable */ 378 /*
379 return 0; 379 * Board doesn't support read only detection; let the mmc core
380 * decide what to do.
381 */
382 return -ENOSYS;
380} 383}
381 384
382static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 385static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
new file mode 100644
index 000000000000..6a1e4994b724
--- /dev/null
+++ b/drivers/mmc/host/s3cmci.c
@@ -0,0 +1,1446 @@
1/*
2 * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
3 *
4 * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/dma-mapping.h>
13#include <linux/clk.h>
14#include <linux/mmc/host.h>
15#include <linux/platform_device.h>
16#include <linux/irq.h>
17#include <linux/io.h>
18
19#include <asm/dma.h>
20
21#include <asm/arch/regs-sdi.h>
22#include <asm/arch/regs-gpio.h>
23
24#include <asm/plat-s3c24xx/mci.h>
25
26#include "s3cmci.h"
27
28#define DRIVER_NAME "s3c-mci"
29
30enum dbg_channels {
31 dbg_err = (1 << 0),
32 dbg_debug = (1 << 1),
33 dbg_info = (1 << 2),
34 dbg_irq = (1 << 3),
35 dbg_sg = (1 << 4),
36 dbg_dma = (1 << 5),
37 dbg_pio = (1 << 6),
38 dbg_fail = (1 << 7),
39 dbg_conf = (1 << 8),
40};
41
42static const int dbgmap_err = dbg_err | dbg_fail;
43static const int dbgmap_info = dbg_info | dbg_conf;
44static const int dbgmap_debug = dbg_debug;
45
46#define dbg(host, channels, args...) \
47 do { \
48 if (dbgmap_err & channels) \
49 dev_err(&host->pdev->dev, args); \
50 else if (dbgmap_info & channels) \
51 dev_info(&host->pdev->dev, args); \
52 else if (dbgmap_debug & channels) \
53 dev_dbg(&host->pdev->dev, args); \
54 } while (0)
55
56#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1)
57
58static struct s3c2410_dma_client s3cmci_dma_client = {
59 .name = "s3c-mci",
60};
61
62static void finalize_request(struct s3cmci_host *host);
63static void s3cmci_send_request(struct mmc_host *mmc);
64static void s3cmci_reset(struct s3cmci_host *host);
65
66#ifdef CONFIG_MMC_DEBUG
67
68static void dbg_dumpregs(struct s3cmci_host *host, char *prefix)
69{
70 u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer, bsize;
71 u32 datcon, datcnt, datsta, fsta, imask;
72
73 con = readl(host->base + S3C2410_SDICON);
74 pre = readl(host->base + S3C2410_SDIPRE);
75 cmdarg = readl(host->base + S3C2410_SDICMDARG);
76 cmdcon = readl(host->base + S3C2410_SDICMDCON);
77 cmdsta = readl(host->base + S3C2410_SDICMDSTAT);
78 r0 = readl(host->base + S3C2410_SDIRSP0);
79 r1 = readl(host->base + S3C2410_SDIRSP1);
80 r2 = readl(host->base + S3C2410_SDIRSP2);
81 r3 = readl(host->base + S3C2410_SDIRSP3);
82 timer = readl(host->base + S3C2410_SDITIMER);
83 bsize = readl(host->base + S3C2410_SDIBSIZE);
84 datcon = readl(host->base + S3C2410_SDIDCON);
85 datcnt = readl(host->base + S3C2410_SDIDCNT);
86 datsta = readl(host->base + S3C2410_SDIDSTA);
87 fsta = readl(host->base + S3C2410_SDIFSTA);
88 imask = readl(host->base + host->sdiimsk);
89
90 dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n",
91 prefix, con, pre, timer);
92
93 dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n",
94 prefix, cmdcon, cmdarg, cmdsta);
95
96 dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]"
97 " DSTA:[%08x] DCNT:[%08x]\n",
98 prefix, datcon, fsta, datsta, datcnt);
99
100 dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]"
101 " R2:[%08x] R3:[%08x]\n",
102 prefix, r0, r1, r2, r3);
103}
104
105static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
106 int stop)
107{
108 snprintf(host->dbgmsg_cmd, 300,
109 "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u",
110 host->ccnt, (stop ? " (STOP)" : ""),
111 cmd->opcode, cmd->arg, cmd->flags, cmd->retries);
112
113 if (cmd->data) {
114 snprintf(host->dbgmsg_dat, 300,
115 "#%u bsize:%u blocks:%u bytes:%u",
116 host->dcnt, cmd->data->blksz,
117 cmd->data->blocks,
118 cmd->data->blocks * cmd->data->blksz);
119 } else {
120 host->dbgmsg_dat[0] = '\0';
121 }
122}
123
124static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd,
125 int fail)
126{
127 unsigned int dbglvl = fail ? dbg_fail : dbg_debug;
128
129 if (!cmd)
130 return;
131
132 if (cmd->error == 0) {
133 dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n",
134 host->dbgmsg_cmd, cmd->resp[0]);
135 } else {
136 dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n",
137 cmd->error, host->dbgmsg_cmd, host->status);
138 }
139
140 if (!cmd->data)
141 return;
142
143 if (cmd->data->error == 0) {
144 dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat);
145 } else {
146 dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n",
147 cmd->data->error, host->dbgmsg_dat,
148 readl(host->base + S3C2410_SDIDCNT));
149 }
150}
151#else
152static void dbg_dumpcmd(struct s3cmci_host *host,
153 struct mmc_command *cmd, int fail) { }
154
155static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
156 int stop) { }
157
158static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
159
160#endif /* CONFIG_MMC_DEBUG */
161
162static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
163{
164 u32 newmask;
165
166 newmask = readl(host->base + host->sdiimsk);
167 newmask |= imask;
168
169 writel(newmask, host->base + host->sdiimsk);
170
171 return newmask;
172}
173
174static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
175{
176 u32 newmask;
177
178 newmask = readl(host->base + host->sdiimsk);
179 newmask &= ~imask;
180
181 writel(newmask, host->base + host->sdiimsk);
182
183 return newmask;
184}
185
186static inline void clear_imask(struct s3cmci_host *host)
187{
188 writel(0, host->base + host->sdiimsk);
189}
190
191static inline int get_data_buffer(struct s3cmci_host *host,
192 u32 *words, u32 **pointer)
193{
194 struct scatterlist *sg;
195
196 if (host->pio_active == XFER_NONE)
197 return -EINVAL;
198
199 if ((!host->mrq) || (!host->mrq->data))
200 return -EINVAL;
201
202 if (host->pio_sgptr >= host->mrq->data->sg_len) {
203 dbg(host, dbg_debug, "no more buffers (%i/%i)\n",
204 host->pio_sgptr, host->mrq->data->sg_len);
205 return -EBUSY;
206 }
207 sg = &host->mrq->data->sg[host->pio_sgptr];
208
209 *words = sg->length >> 2;
210 *pointer = sg_virt(sg);
211
212 host->pio_sgptr++;
213
214 dbg(host, dbg_sg, "new buffer (%i/%i)\n",
215 host->pio_sgptr, host->mrq->data->sg_len);
216
217 return 0;
218}
219
220static inline u32 fifo_count(struct s3cmci_host *host)
221{
222 u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
223
224 fifostat &= S3C2410_SDIFSTA_COUNTMASK;
225 return fifostat >> 2;
226}
227
228static inline u32 fifo_free(struct s3cmci_host *host)
229{
230 u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
231
232 fifostat &= S3C2410_SDIFSTA_COUNTMASK;
233 return (63 - fifostat) >> 2;
234}
235
236static void do_pio_read(struct s3cmci_host *host)
237{
238 int res;
239 u32 fifo;
240 void __iomem *from_ptr;
241
242 /* write real prescaler to host, it might be set slow to fix */
243 writel(host->prescaler, host->base + S3C2410_SDIPRE);
244
245 from_ptr = host->base + host->sdidata;
246
247 while ((fifo = fifo_count(host))) {
248 if (!host->pio_words) {
249 res = get_data_buffer(host, &host->pio_words,
250 &host->pio_ptr);
251 if (res) {
252 host->pio_active = XFER_NONE;
253 host->complete_what = COMPLETION_FINALIZE;
254
255 dbg(host, dbg_pio, "pio_read(): "
256 "complete (no more data).\n");
257 return;
258 }
259
260 dbg(host, dbg_pio,
261 "pio_read(): new target: [%i]@[%p]\n",
262 host->pio_words, host->pio_ptr);
263 }
264
265 dbg(host, dbg_pio,
266 "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n",
267 fifo, host->pio_words,
268 readl(host->base + S3C2410_SDIDCNT));
269
270 if (fifo > host->pio_words)
271 fifo = host->pio_words;
272
273 host->pio_words -= fifo;
274 host->pio_count += fifo;
275
276 while (fifo--)
277 *(host->pio_ptr++) = readl(from_ptr);
278 }
279
280 if (!host->pio_words) {
281 res = get_data_buffer(host, &host->pio_words, &host->pio_ptr);
282 if (res) {
283 dbg(host, dbg_pio,
284 "pio_read(): complete (no more buffers).\n");
285 host->pio_active = XFER_NONE;
286 host->complete_what = COMPLETION_FINALIZE;
287
288 return;
289 }
290 }
291
292 enable_imask(host,
293 S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST);
294}
295
296static void do_pio_write(struct s3cmci_host *host)
297{
298 void __iomem *to_ptr;
299 int res;
300 u32 fifo;
301
302 to_ptr = host->base + host->sdidata;
303
304 while ((fifo = fifo_free(host))) {
305 if (!host->pio_words) {
306 res = get_data_buffer(host, &host->pio_words,
307 &host->pio_ptr);
308 if (res) {
309 dbg(host, dbg_pio,
310 "pio_write(): complete (no more data).\n");
311 host->pio_active = XFER_NONE;
312
313 return;
314 }
315
316 dbg(host, dbg_pio,
317 "pio_write(): new source: [%i]@[%p]\n",
318 host->pio_words, host->pio_ptr);
319
320 }
321
322 if (fifo > host->pio_words)
323 fifo = host->pio_words;
324
325 host->pio_words -= fifo;
326 host->pio_count += fifo;
327
328 while (fifo--)
329 writel(*(host->pio_ptr++), to_ptr);
330 }
331
332 enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
333}
334
335static void pio_tasklet(unsigned long data)
336{
337 struct s3cmci_host *host = (struct s3cmci_host *) data;
338
339
340 disable_irq(host->irq);
341
342 if (host->pio_active == XFER_WRITE)
343 do_pio_write(host);
344
345 if (host->pio_active == XFER_READ)
346 do_pio_read(host);
347
348 if (host->complete_what == COMPLETION_FINALIZE) {
349 clear_imask(host);
350 if (host->pio_active != XFER_NONE) {
351 dbg(host, dbg_err, "unfinished %s "
352 "- pio_count:[%u] pio_words:[%u]\n",
353 (host->pio_active == XFER_READ) ? "read" : "write",
354 host->pio_count, host->pio_words);
355
356 if (host->mrq->data)
357 host->mrq->data->error = -EINVAL;
358 }
359
360 finalize_request(host);
361 } else
362 enable_irq(host->irq);
363}
364
365/*
366 * ISR for SDI Interface IRQ
367 * Communication between driver and ISR works as follows:
368 * host->mrq points to current request
369 * host->complete_what Indicates when the request is considered done
370 * COMPLETION_CMDSENT when the command was sent
371 * COMPLETION_RSPFIN when a response was received
372 * COMPLETION_XFERFINISH when the data transfer is finished
373 * COMPLETION_XFERFINISH_RSPFIN both of the above.
374 * host->complete_request is the completion-object the driver waits for
375 *
376 * 1) Driver sets up host->mrq and host->complete_what
377 * 2) Driver prepares the transfer
378 * 3) Driver enables interrupts
379 * 4) Driver starts transfer
380 * 5) Driver waits for host->complete_rquest
381 * 6) ISR checks for request status (errors and success)
382 * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error
383 * 7) ISR completes host->complete_request
384 * 8) ISR disables interrupts
385 * 9) Driver wakes up and takes care of the request
386 *
387 * Note: "->error"-fields are expected to be set to 0 before the request
388 * was issued by mmc.c - therefore they are only set, when an error
389 * contition comes up
390 */
391
392static irqreturn_t s3cmci_irq(int irq, void *dev_id)
393{
394 struct s3cmci_host *host = dev_id;
395 struct mmc_command *cmd;
396 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
397 u32 mci_cclear, mci_dclear;
398 unsigned long iflags;
399
400 spin_lock_irqsave(&host->complete_lock, iflags);
401
402 mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
403 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
404 mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
405 mci_fsta = readl(host->base + S3C2410_SDIFSTA);
406 mci_imsk = readl(host->base + host->sdiimsk);
407 mci_cclear = 0;
408 mci_dclear = 0;
409
410 if ((host->complete_what == COMPLETION_NONE) ||
411 (host->complete_what == COMPLETION_FINALIZE)) {
412 host->status = "nothing to complete";
413 clear_imask(host);
414 goto irq_out;
415 }
416
417 if (!host->mrq) {
418 host->status = "no active mrq";
419 clear_imask(host);
420 goto irq_out;
421 }
422
423 cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd;
424
425 if (!cmd) {
426 host->status = "no active cmd";
427 clear_imask(host);
428 goto irq_out;
429 }
430
431 if (!host->dodma) {
432 if ((host->pio_active == XFER_WRITE) &&
433 (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
434
435 disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
436 tasklet_schedule(&host->pio_tasklet);
437 host->status = "pio tx";
438 }
439
440 if ((host->pio_active == XFER_READ) &&
441 (mci_fsta & S3C2410_SDIFSTA_RFDET)) {
442
443 disable_imask(host,
444 S3C2410_SDIIMSK_RXFIFOHALF |
445 S3C2410_SDIIMSK_RXFIFOLAST);
446
447 tasklet_schedule(&host->pio_tasklet);
448 host->status = "pio rx";
449 }
450 }
451
452 if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) {
453 dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n");
454 cmd->error = -ETIMEDOUT;
455 host->status = "error: command timeout";
456 goto fail_transfer;
457 }
458
459 if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) {
460 if (host->complete_what == COMPLETION_CMDSENT) {
461 host->status = "ok: command sent";
462 goto close_transfer;
463 }
464
465 mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT;
466 }
467
468 if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) {
469 if (cmd->flags & MMC_RSP_CRC) {
470 if (host->mrq->cmd->flags & MMC_RSP_136) {
471 dbg(host, dbg_irq,
472 "fixup: ignore CRC fail with long rsp\n");
473 } else {
474 /* note, we used to fail the transfer
475 * here, but it seems that this is just
476 * the hardware getting it wrong.
477 *
478 * cmd->error = -EILSEQ;
479 * host->status = "error: bad command crc";
480 * goto fail_transfer;
481 */
482 }
483 }
484
485 mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL;
486 }
487
488 if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) {
489 if (host->complete_what == COMPLETION_RSPFIN) {
490 host->status = "ok: command response received";
491 goto close_transfer;
492 }
493
494 if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
495 host->complete_what = COMPLETION_XFERFINISH;
496
497 mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN;
498 }
499
500 /* errors handled after this point are only relevant
501 when a data transfer is in progress */
502
503 if (!cmd->data)
504 goto clear_status_bits;
505
506 /* Check for FIFO failure */
507 if (host->is2440) {
508 if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) {
509 dbg(host, dbg_err, "FIFO failure\n");
510 host->mrq->data->error = -EILSEQ;
511 host->status = "error: 2440 fifo failure";
512 goto fail_transfer;
513 }
514 } else {
515 if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) {
516 dbg(host, dbg_err, "FIFO failure\n");
517 cmd->data->error = -EILSEQ;
518 host->status = "error: fifo failure";
519 goto fail_transfer;
520 }
521 }
522
523 if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) {
524 dbg(host, dbg_err, "bad data crc (outgoing)\n");
525 cmd->data->error = -EILSEQ;
526 host->status = "error: bad data crc (outgoing)";
527 goto fail_transfer;
528 }
529
530 if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) {
531 dbg(host, dbg_err, "bad data crc (incoming)\n");
532 cmd->data->error = -EILSEQ;
533 host->status = "error: bad data crc (incoming)";
534 goto fail_transfer;
535 }
536
537 if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) {
538 dbg(host, dbg_err, "data timeout\n");
539 cmd->data->error = -ETIMEDOUT;
540 host->status = "error: data timeout";
541 goto fail_transfer;
542 }
543
544 if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) {
545 if (host->complete_what == COMPLETION_XFERFINISH) {
546 host->status = "ok: data transfer completed";
547 goto close_transfer;
548 }
549
550 if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
551 host->complete_what = COMPLETION_RSPFIN;
552
553 mci_dclear |= S3C2410_SDIDSTA_XFERFINISH;
554 }
555
556clear_status_bits:
557 writel(mci_cclear, host->base + S3C2410_SDICMDSTAT);
558 writel(mci_dclear, host->base + S3C2410_SDIDSTA);
559
560 goto irq_out;
561
562fail_transfer:
563 host->pio_active = XFER_NONE;
564
565close_transfer:
566 host->complete_what = COMPLETION_FINALIZE;
567
568 clear_imask(host);
569 tasklet_schedule(&host->pio_tasklet);
570
571 goto irq_out;
572
573irq_out:
574 dbg(host, dbg_irq,
575 "csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n",
576 mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status);
577
578 spin_unlock_irqrestore(&host->complete_lock, iflags);
579 return IRQ_HANDLED;
580
581}
582
583/*
584 * ISR for the CardDetect Pin
585*/
586
587static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id)
588{
589 struct s3cmci_host *host = (struct s3cmci_host *)dev_id;
590
591 dbg(host, dbg_irq, "card detect\n");
592
593 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
594
595 return IRQ_HANDLED;
596}
597
598void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, void *buf_id,
599 int size, enum s3c2410_dma_buffresult result)
600{
601 struct s3cmci_host *host = buf_id;
602 unsigned long iflags;
603 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt;
604
605 mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
606 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
607 mci_fsta = readl(host->base + S3C2410_SDIFSTA);
608 mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
609
610 BUG_ON(!host->mrq);
611 BUG_ON(!host->mrq->data);
612 BUG_ON(!host->dmatogo);
613
614 spin_lock_irqsave(&host->complete_lock, iflags);
615
616 if (result != S3C2410_RES_OK) {
617 dbg(host, dbg_fail, "DMA FAILED: csta=0x%08x dsta=0x%08x "
618 "fsta=0x%08x dcnt:0x%08x result:0x%08x toGo:%u\n",
619 mci_csta, mci_dsta, mci_fsta,
620 mci_dcnt, result, host->dmatogo);
621
622 goto fail_request;
623 }
624
625 host->dmatogo--;
626 if (host->dmatogo) {
627 dbg(host, dbg_dma, "DMA DONE Size:%i DSTA:[%08x] "
628 "DCNT:[%08x] toGo:%u\n",
629 size, mci_dsta, mci_dcnt, host->dmatogo);
630
631 goto out;
632 }
633
634 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n",
635 size, mci_dsta, mci_dcnt);
636
637 host->complete_what = COMPLETION_FINALIZE;
638
639out:
640 tasklet_schedule(&host->pio_tasklet);
641 spin_unlock_irqrestore(&host->complete_lock, iflags);
642 return;
643
644fail_request:
645 host->mrq->data->error = -EINVAL;
646 host->complete_what = COMPLETION_FINALIZE;
647 writel(0, host->base + host->sdiimsk);
648 goto out;
649
650}
651
652static void finalize_request(struct s3cmci_host *host)
653{
654 struct mmc_request *mrq = host->mrq;
655 struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
656 int debug_as_failure = 0;
657
658 if (host->complete_what != COMPLETION_FINALIZE)
659 return;
660
661 if (!mrq)
662 return;
663
664 if (cmd->data && (cmd->error == 0) &&
665 (cmd->data->error == 0)) {
666 if (host->dodma && (!host->dma_complete)) {
667 dbg(host, dbg_dma, "DMA Missing!\n");
668 return;
669 }
670 }
671
672 /* Read response from controller. */
673 cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0);
674 cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1);
675 cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2);
676 cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3);
677
678 writel(host->prescaler, host->base + S3C2410_SDIPRE);
679
680 if (cmd->error)
681 debug_as_failure = 1;
682
683 if (cmd->data && cmd->data->error)
684 debug_as_failure = 1;
685
686 dbg_dumpcmd(host, cmd, debug_as_failure);
687
688 /* Cleanup controller */
689 writel(0, host->base + S3C2410_SDICMDARG);
690 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
691 writel(0, host->base + S3C2410_SDICMDCON);
692 writel(0, host->base + host->sdiimsk);
693
694 if (cmd->data && cmd->error)
695 cmd->data->error = cmd->error;
696
697 if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) {
698 host->cmd_is_stop = 1;
699 s3cmci_send_request(host->mmc);
700 return;
701 }
702
703 /* If we have no data transfer we are finished here */
704 if (!mrq->data)
705 goto request_done;
706
707 /* Calulate the amout of bytes transfer if there was no error */
708 if (mrq->data->error == 0) {
709 mrq->data->bytes_xfered =
710 (mrq->data->blocks * mrq->data->blksz);
711 } else {
712 mrq->data->bytes_xfered = 0;
713 }
714
715 /* If we had an error while transfering data we flush the
716 * DMA channel and the fifo to clear out any garbage. */
717 if (mrq->data->error != 0) {
718 if (host->dodma)
719 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
720
721 if (host->is2440) {
722 /* Clear failure register and reset fifo. */
723 writel(S3C2440_SDIFSTA_FIFORESET |
724 S3C2440_SDIFSTA_FIFOFAIL,
725 host->base + S3C2410_SDIFSTA);
726 } else {
727 u32 mci_con;
728
729 /* reset fifo */
730 mci_con = readl(host->base + S3C2410_SDICON);
731 mci_con |= S3C2410_SDICON_FIFORESET;
732
733 writel(mci_con, host->base + S3C2410_SDICON);
734 }
735 }
736
737request_done:
738 host->complete_what = COMPLETION_NONE;
739 host->mrq = NULL;
740 mmc_request_done(host->mmc, mrq);
741}
742
743
744void s3cmci_dma_setup(struct s3cmci_host *host, enum s3c2410_dmasrc source)
745{
746 static enum s3c2410_dmasrc last_source = -1;
747 static int setup_ok;
748
749 if (last_source == source)
750 return;
751
752 last_source = source;
753
754 s3c2410_dma_devconfig(host->dma, source, 3,
755 host->mem->start + host->sdidata);
756
757 if (!setup_ok) {
758 s3c2410_dma_config(host->dma, 4,
759 (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI));
760 s3c2410_dma_set_buffdone_fn(host->dma,
761 s3cmci_dma_done_callback);
762 s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
763 setup_ok = 1;
764 }
765}
766
767static void s3cmci_send_command(struct s3cmci_host *host,
768 struct mmc_command *cmd)
769{
770 u32 ccon, imsk;
771
772 imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT |
773 S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT |
774 S3C2410_SDIIMSK_RESPONSECRC;
775
776 enable_imask(host, imsk);
777
778 if (cmd->data)
779 host->complete_what = COMPLETION_XFERFINISH_RSPFIN;
780 else if (cmd->flags & MMC_RSP_PRESENT)
781 host->complete_what = COMPLETION_RSPFIN;
782 else
783 host->complete_what = COMPLETION_CMDSENT;
784
785 writel(cmd->arg, host->base + S3C2410_SDICMDARG);
786
787 ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX;
788 ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART;
789
790 if (cmd->flags & MMC_RSP_PRESENT)
791 ccon |= S3C2410_SDICMDCON_WAITRSP;
792
793 if (cmd->flags & MMC_RSP_136)
794 ccon |= S3C2410_SDICMDCON_LONGRSP;
795
796 writel(ccon, host->base + S3C2410_SDICMDCON);
797}
798
799static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
800{
801 u32 dcon, imsk, stoptries = 3;
802
803 /* write DCON register */
804
805 if (!data) {
806 writel(0, host->base + S3C2410_SDIDCON);
807 return 0;
808 }
809
810 if ((data->blksz & 3) != 0) {
811 /* We cannot deal with unaligned blocks with more than
812 * one block being transfered. */
813
814 if (data->blocks > 1)
815 return -EINVAL;
816
817 /* No support yet for non-word block transfers. */
818 return -EINVAL;
819 }
820
821 while (readl(host->base + S3C2410_SDIDSTA) &
822 (S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) {
823
824 dbg(host, dbg_err,
825 "mci_setup_data() transfer stillin progress.\n");
826
827 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
828 s3cmci_reset(host);
829
830 if ((stoptries--) == 0) {
831 dbg_dumpregs(host, "DRF");
832 return -EINVAL;
833 }
834 }
835
836 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
837
838 if (host->dodma)
839 dcon |= S3C2410_SDIDCON_DMAEN;
840
841 if (host->bus_width == MMC_BUS_WIDTH_4)
842 dcon |= S3C2410_SDIDCON_WIDEBUS;
843
844 if (!(data->flags & MMC_DATA_STREAM))
845 dcon |= S3C2410_SDIDCON_BLOCKMODE;
846
847 if (data->flags & MMC_DATA_WRITE) {
848 dcon |= S3C2410_SDIDCON_TXAFTERRESP;
849 dcon |= S3C2410_SDIDCON_XFER_TXSTART;
850 }
851
852 if (data->flags & MMC_DATA_READ) {
853 dcon |= S3C2410_SDIDCON_RXAFTERCMD;
854 dcon |= S3C2410_SDIDCON_XFER_RXSTART;
855 }
856
857 if (host->is2440) {
858 dcon |= S3C2440_SDIDCON_DS_WORD;
859 dcon |= S3C2440_SDIDCON_DATSTART;
860 }
861
862 writel(dcon, host->base + S3C2410_SDIDCON);
863
864 /* write BSIZE register */
865
866 writel(data->blksz, host->base + S3C2410_SDIBSIZE);
867
868 /* add to IMASK register */
869 imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC |
870 S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH;
871
872 enable_imask(host, imsk);
873
874 /* write TIMER register */
875
876 if (host->is2440) {
877 writel(0x007FFFFF, host->base + S3C2410_SDITIMER);
878 } else {
879 writel(0x0000FFFF, host->base + S3C2410_SDITIMER);
880
881 /* FIX: set slow clock to prevent timeouts on read */
882 if (data->flags & MMC_DATA_READ)
883 writel(0xFF, host->base + S3C2410_SDIPRE);
884 }
885
886 return 0;
887}
888
889#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ)
890
891static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
892{
893 int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
894
895 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
896
897 host->pio_sgptr = 0;
898 host->pio_words = 0;
899 host->pio_count = 0;
900 host->pio_active = rw ? XFER_WRITE : XFER_READ;
901
902 if (rw) {
903 do_pio_write(host);
904 enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
905 } else {
906 enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF
907 | S3C2410_SDIIMSK_RXFIFOLAST);
908 }
909
910 return 0;
911}
912
913static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
914{
915 int dma_len, i;
916 int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
917
918 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
919
920 s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW);
921 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
922
923 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
924 (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
925
926 if (dma_len == 0)
927 return -ENOMEM;
928
929 host->dma_complete = 0;
930 host->dmatogo = dma_len;
931
932 for (i = 0; i < dma_len; i++) {
933 int res;
934
935 dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i,
936 sg_dma_address(&data->sg[i]),
937 sg_dma_len(&data->sg[i]));
938
939 res = s3c2410_dma_enqueue(host->dma, (void *) host,
940 sg_dma_address(&data->sg[i]),
941 sg_dma_len(&data->sg[i]));
942
943 if (res) {
944 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
945 return -EBUSY;
946 }
947 }
948
949 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_START);
950
951 return 0;
952}
953
954static void s3cmci_send_request(struct mmc_host *mmc)
955{
956 struct s3cmci_host *host = mmc_priv(mmc);
957 struct mmc_request *mrq = host->mrq;
958 struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
959
960 host->ccnt++;
961 prepare_dbgmsg(host, cmd, host->cmd_is_stop);
962
963 /* Clear command, data and fifo status registers
964 Fifo clear only necessary on 2440, but doesn't hurt on 2410
965 */
966 writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT);
967 writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA);
968 writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA);
969
970 if (cmd->data) {
971 int res = s3cmci_setup_data(host, cmd->data);
972
973 host->dcnt++;
974
975 if (res) {
976 dbg(host, dbg_err, "setup data error %d\n", res);
977 cmd->error = res;
978 cmd->data->error = res;
979
980 mmc_request_done(mmc, mrq);
981 return;
982 }
983
984 if (host->dodma)
985 res = s3cmci_prepare_dma(host, cmd->data);
986 else
987 res = s3cmci_prepare_pio(host, cmd->data);
988
989 if (res) {
990 dbg(host, dbg_err, "data prepare error %d\n", res);
991 cmd->error = res;
992 cmd->data->error = res;
993
994 mmc_request_done(mmc, mrq);
995 return;
996 }
997 }
998
999 /* Send command */
1000 s3cmci_send_command(host, cmd);
1001
1002 /* Enable Interrupt */
1003 enable_irq(host->irq);
1004}
1005
1006static int s3cmci_card_present(struct s3cmci_host *host)
1007{
1008 struct s3c24xx_mci_pdata *pdata = host->pdata;
1009 int ret;
1010
1011 if (pdata->gpio_detect == 0)
1012 return -ENOSYS;
1013
1014 ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1;
1015 return ret ^ pdata->detect_invert;
1016}
1017
1018static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1019{
1020 struct s3cmci_host *host = mmc_priv(mmc);
1021
1022 host->status = "mmc request";
1023 host->cmd_is_stop = 0;
1024 host->mrq = mrq;
1025
1026 if (s3cmci_card_present(host) == 0) {
1027 dbg(host, dbg_err, "%s: no medium present\n", __func__);
1028 host->mrq->cmd->error = -ENOMEDIUM;
1029 mmc_request_done(mmc, mrq);
1030 } else
1031 s3cmci_send_request(mmc);
1032}
1033
1034static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1035{
1036 struct s3cmci_host *host = mmc_priv(mmc);
1037 u32 mci_psc, mci_con;
1038
1039 /* Set the power state */
1040
1041 mci_con = readl(host->base + S3C2410_SDICON);
1042
1043 switch (ios->power_mode) {
1044 case MMC_POWER_ON:
1045 case MMC_POWER_UP:
1046 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK);
1047 s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD);
1048 s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0);
1049 s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1);
1050 s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2);
1051 s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3);
1052
1053 if (host->pdata->set_power)
1054 host->pdata->set_power(ios->power_mode, ios->vdd);
1055
1056 if (!host->is2440)
1057 mci_con |= S3C2410_SDICON_FIFORESET;
1058
1059 break;
1060
1061 case MMC_POWER_OFF:
1062 default:
1063 s3c2410_gpio_setpin(S3C2410_GPE5, 0);
1064 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_OUTP);
1065
1066 if (host->is2440)
1067 mci_con |= S3C2440_SDICON_SDRESET;
1068
1069 if (host->pdata->set_power)
1070 host->pdata->set_power(ios->power_mode, ios->vdd);
1071
1072 break;
1073 }
1074
1075 /* Set clock */
1076 for (mci_psc = 0; mci_psc < 255; mci_psc++) {
1077 host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1));
1078
1079 if (host->real_rate <= ios->clock)
1080 break;
1081 }
1082
1083 if (mci_psc > 255)
1084 mci_psc = 255;
1085
1086 host->prescaler = mci_psc;
1087 writel(host->prescaler, host->base + S3C2410_SDIPRE);
1088
1089 /* If requested clock is 0, real_rate will be 0, too */
1090 if (ios->clock == 0)
1091 host->real_rate = 0;
1092
1093 /* Set CLOCK_ENABLE */
1094 if (ios->clock)
1095 mci_con |= S3C2410_SDICON_CLOCKTYPE;
1096 else
1097 mci_con &= ~S3C2410_SDICON_CLOCKTYPE;
1098
1099 writel(mci_con, host->base + S3C2410_SDICON);
1100
1101 if ((ios->power_mode == MMC_POWER_ON) ||
1102 (ios->power_mode == MMC_POWER_UP)) {
1103 dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n",
1104 host->real_rate/1000, ios->clock/1000);
1105 } else {
1106 dbg(host, dbg_conf, "powered down.\n");
1107 }
1108
1109 host->bus_width = ios->bus_width;
1110}
1111
1112static void s3cmci_reset(struct s3cmci_host *host)
1113{
1114 u32 con = readl(host->base + S3C2410_SDICON);
1115
1116 con |= S3C2440_SDICON_SDRESET;
1117 writel(con, host->base + S3C2410_SDICON);
1118}
1119
1120static int s3cmci_get_ro(struct mmc_host *mmc)
1121{
1122 struct s3cmci_host *host = mmc_priv(mmc);
1123 struct s3c24xx_mci_pdata *pdata = host->pdata;
1124 int ret;
1125
1126 if (pdata->gpio_wprotect == 0)
1127 return 0;
1128
1129 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect);
1130
1131 if (pdata->wprotect_invert)
1132 ret = !ret;
1133
1134 return ret;
1135}
1136
1137static struct mmc_host_ops s3cmci_ops = {
1138 .request = s3cmci_request,
1139 .set_ios = s3cmci_set_ios,
1140 .get_ro = s3cmci_get_ro,
1141};
1142
1143static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
1144 /* This is currently here to avoid a number of if (host->pdata)
1145 * checks. Any zero fields to ensure reaonable defaults are picked. */
1146};
1147
1148static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1149{
1150 struct s3cmci_host *host;
1151 struct mmc_host *mmc;
1152 int ret;
1153
1154 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
1155 if (!mmc) {
1156 ret = -ENOMEM;
1157 goto probe_out;
1158 }
1159
1160 host = mmc_priv(mmc);
1161 host->mmc = mmc;
1162 host->pdev = pdev;
1163 host->is2440 = is2440;
1164
1165 host->pdata = pdev->dev.platform_data;
1166 if (!host->pdata) {
1167 pdev->dev.platform_data = &s3cmci_def_pdata;
1168 host->pdata = &s3cmci_def_pdata;
1169 }
1170
1171 spin_lock_init(&host->complete_lock);
1172 tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host);
1173
1174 if (is2440) {
1175 host->sdiimsk = S3C2440_SDIIMSK;
1176 host->sdidata = S3C2440_SDIDATA;
1177 host->clk_div = 1;
1178 } else {
1179 host->sdiimsk = S3C2410_SDIIMSK;
1180 host->sdidata = S3C2410_SDIDATA;
1181 host->clk_div = 2;
1182 }
1183
1184 host->dodma = 0;
1185 host->complete_what = COMPLETION_NONE;
1186 host->pio_active = XFER_NONE;
1187
1188 host->dma = S3CMCI_DMA;
1189
1190 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1191 if (!host->mem) {
1192 dev_err(&pdev->dev,
1193 "failed to get io memory region resouce.\n");
1194
1195 ret = -ENOENT;
1196 goto probe_free_host;
1197 }
1198
1199 host->mem = request_mem_region(host->mem->start,
1200 RESSIZE(host->mem), pdev->name);
1201
1202 if (!host->mem) {
1203 dev_err(&pdev->dev, "failed to request io memory region.\n");
1204 ret = -ENOENT;
1205 goto probe_free_host;
1206 }
1207
1208 host->base = ioremap(host->mem->start, RESSIZE(host->mem));
1209 if (host->base == 0) {
1210 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
1211 ret = -EINVAL;
1212 goto probe_free_mem_region;
1213 }
1214
1215 host->irq = platform_get_irq(pdev, 0);
1216 if (host->irq == 0) {
1217 dev_err(&pdev->dev, "failed to get interrupt resouce.\n");
1218 ret = -EINVAL;
1219 goto probe_iounmap;
1220 }
1221
1222 if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) {
1223 dev_err(&pdev->dev, "failed to request mci interrupt.\n");
1224 ret = -ENOENT;
1225 goto probe_iounmap;
1226 }
1227
1228 /* We get spurious interrupts even when we have set the IMSK
1229 * register to ignore everything, so use disable_irq() to make
1230 * ensure we don't lock the system with un-serviceable requests. */
1231
1232 disable_irq(host->irq);
1233
1234 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
1235
1236 if (host->irq_cd >= 0) {
1237 if (request_irq(host->irq_cd, s3cmci_irq_cd,
1238 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1239 DRIVER_NAME, host)) {
1240 dev_err(&pdev->dev, "can't get card detect irq.\n");
1241 ret = -ENOENT;
1242 goto probe_free_irq;
1243 }
1244 } else {
1245 dev_warn(&pdev->dev, "host detect has no irq available\n");
1246 s3c2410_gpio_cfgpin(host->pdata->gpio_detect,
1247 S3C2410_GPIO_INPUT);
1248 }
1249
1250 if (host->pdata->gpio_wprotect)
1251 s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect,
1252 S3C2410_GPIO_INPUT);
1253
1254 if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) {
1255 dev_err(&pdev->dev, "unable to get DMA channel.\n");
1256 ret = -EBUSY;
1257 goto probe_free_irq_cd;
1258 }
1259
1260 host->clk = clk_get(&pdev->dev, "sdi");
1261 if (IS_ERR(host->clk)) {
1262 dev_err(&pdev->dev, "failed to find clock source.\n");
1263 ret = PTR_ERR(host->clk);
1264 host->clk = NULL;
1265 goto probe_free_host;
1266 }
1267
1268 ret = clk_enable(host->clk);
1269 if (ret) {
1270 dev_err(&pdev->dev, "failed to enable clock source.\n");
1271 goto clk_free;
1272 }
1273
1274 host->clk_rate = clk_get_rate(host->clk);
1275
1276 mmc->ops = &s3cmci_ops;
1277 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1278 mmc->caps = MMC_CAP_4_BIT_DATA;
1279 mmc->f_min = host->clk_rate / (host->clk_div * 256);
1280 mmc->f_max = host->clk_rate / host->clk_div;
1281
1282 if (host->pdata->ocr_avail)
1283 mmc->ocr_avail = host->pdata->ocr_avail;
1284
1285 mmc->max_blk_count = 4095;
1286 mmc->max_blk_size = 4095;
1287 mmc->max_req_size = 4095 * 512;
1288 mmc->max_seg_size = mmc->max_req_size;
1289
1290 mmc->max_phys_segs = 128;
1291 mmc->max_hw_segs = 128;
1292
1293 dbg(host, dbg_debug,
1294 "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n",
1295 (host->is2440?"2440":""),
1296 host->base, host->irq, host->irq_cd, host->dma);
1297
1298 ret = mmc_add_host(mmc);
1299 if (ret) {
1300 dev_err(&pdev->dev, "failed to add mmc host.\n");
1301 goto free_dmabuf;
1302 }
1303
1304 platform_set_drvdata(pdev, mmc);
1305 dev_info(&pdev->dev, "initialisation done.\n");
1306
1307 return 0;
1308
1309 free_dmabuf:
1310 clk_disable(host->clk);
1311
1312 clk_free:
1313 clk_put(host->clk);
1314
1315 probe_free_irq_cd:
1316 if (host->irq_cd >= 0)
1317 free_irq(host->irq_cd, host);
1318
1319 probe_free_irq:
1320 free_irq(host->irq, host);
1321
1322 probe_iounmap:
1323 iounmap(host->base);
1324
1325 probe_free_mem_region:
1326 release_mem_region(host->mem->start, RESSIZE(host->mem));
1327
1328 probe_free_host:
1329 mmc_free_host(mmc);
1330 probe_out:
1331 return ret;
1332}
1333
1334static int __devexit s3cmci_remove(struct platform_device *pdev)
1335{
1336 struct mmc_host *mmc = platform_get_drvdata(pdev);
1337 struct s3cmci_host *host = mmc_priv(mmc);
1338
1339 mmc_remove_host(mmc);
1340
1341 clk_disable(host->clk);
1342 clk_put(host->clk);
1343
1344 tasklet_disable(&host->pio_tasklet);
1345 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client);
1346
1347 if (host->irq_cd >= 0)
1348 free_irq(host->irq_cd, host);
1349 free_irq(host->irq, host);
1350
1351 iounmap(host->base);
1352 release_mem_region(host->mem->start, RESSIZE(host->mem));
1353
1354 mmc_free_host(mmc);
1355 return 0;
1356}
1357
1358static int __devinit s3cmci_probe_2410(struct platform_device *dev)
1359{
1360 return s3cmci_probe(dev, 0);
1361}
1362
1363static int __devinit s3cmci_probe_2412(struct platform_device *dev)
1364{
1365 return s3cmci_probe(dev, 1);
1366}
1367
1368static int __devinit s3cmci_probe_2440(struct platform_device *dev)
1369{
1370 return s3cmci_probe(dev, 1);
1371}
1372
1373#ifdef CONFIG_PM
1374
1375static int s3cmci_suspend(struct platform_device *dev, pm_message_t state)
1376{
1377 struct mmc_host *mmc = platform_get_drvdata(dev);
1378
1379 return mmc_suspend_host(mmc, state);
1380}
1381
1382static int s3cmci_resume(struct platform_device *dev)
1383{
1384 struct mmc_host *mmc = platform_get_drvdata(dev);
1385
1386 return mmc_resume_host(mmc);
1387}
1388
1389#else /* CONFIG_PM */
1390#define s3cmci_suspend NULL
1391#define s3cmci_resume NULL
1392#endif /* CONFIG_PM */
1393
1394
1395static struct platform_driver s3cmci_driver_2410 = {
1396 .driver.name = "s3c2410-sdi",
1397 .driver.owner = THIS_MODULE,
1398 .probe = s3cmci_probe_2410,
1399 .remove = __devexit_p(s3cmci_remove),
1400 .suspend = s3cmci_suspend,
1401 .resume = s3cmci_resume,
1402};
1403
1404static struct platform_driver s3cmci_driver_2412 = {
1405 .driver.name = "s3c2412-sdi",
1406 .driver.owner = THIS_MODULE,
1407 .probe = s3cmci_probe_2412,
1408 .remove = __devexit_p(s3cmci_remove),
1409 .suspend = s3cmci_suspend,
1410 .resume = s3cmci_resume,
1411};
1412
1413static struct platform_driver s3cmci_driver_2440 = {
1414 .driver.name = "s3c2440-sdi",
1415 .driver.owner = THIS_MODULE,
1416 .probe = s3cmci_probe_2440,
1417 .remove = __devexit_p(s3cmci_remove),
1418 .suspend = s3cmci_suspend,
1419 .resume = s3cmci_resume,
1420};
1421
1422
1423static int __init s3cmci_init(void)
1424{
1425 platform_driver_register(&s3cmci_driver_2410);
1426 platform_driver_register(&s3cmci_driver_2412);
1427 platform_driver_register(&s3cmci_driver_2440);
1428 return 0;
1429}
1430
1431static void __exit s3cmci_exit(void)
1432{
1433 platform_driver_unregister(&s3cmci_driver_2410);
1434 platform_driver_unregister(&s3cmci_driver_2412);
1435 platform_driver_unregister(&s3cmci_driver_2440);
1436}
1437
1438module_init(s3cmci_init);
1439module_exit(s3cmci_exit);
1440
1441MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
1442MODULE_LICENSE("GPL v2");
1443MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>");
1444MODULE_ALIAS("platform:s3c2410-sdi");
1445MODULE_ALIAS("platform:s3c2412-sdi");
1446MODULE_ALIAS("platform:s3c2440-sdi");
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
new file mode 100644
index 000000000000..37d9c60010c9
--- /dev/null
+++ b/drivers/mmc/host/s3cmci.h
@@ -0,0 +1,70 @@
1/*
2 * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
3 *
4 * Copyright (C) 2004-2006 Thomas Kleffel, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/* FIXME: DMA Resource management ?! */
12#define S3CMCI_DMA 0
13
14enum s3cmci_waitfor {
15 COMPLETION_NONE,
16 COMPLETION_FINALIZE,
17 COMPLETION_CMDSENT,
18 COMPLETION_RSPFIN,
19 COMPLETION_XFERFINISH,
20 COMPLETION_XFERFINISH_RSPFIN,
21};
22
23struct s3cmci_host {
24 struct platform_device *pdev;
25 struct s3c24xx_mci_pdata *pdata;
26 struct mmc_host *mmc;
27 struct resource *mem;
28 struct clk *clk;
29 void __iomem *base;
30 int irq;
31 int irq_cd;
32 int dma;
33
34 unsigned long clk_rate;
35 unsigned long clk_div;
36 unsigned long real_rate;
37 u8 prescaler;
38
39 int is2440;
40 unsigned sdiimsk;
41 unsigned sdidata;
42 int dodma;
43 int dmatogo;
44
45 struct mmc_request *mrq;
46 int cmd_is_stop;
47
48 spinlock_t complete_lock;
49 enum s3cmci_waitfor complete_what;
50
51 int dma_complete;
52
53 u32 pio_sgptr;
54 u32 pio_words;
55 u32 pio_count;
56 u32 *pio_ptr;
57#define XFER_NONE 0
58#define XFER_READ 1
59#define XFER_WRITE 2
60 u32 pio_active;
61
62 int bus_width;
63
64 char dbgmsg_cmd[301];
65 char dbgmsg_dat[301];
66 char *status;
67
68 unsigned int ccnt, dcnt;
69 struct tasklet_struct pio_tasklet;
70};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
new file mode 100644
index 000000000000..deb607c52c0d
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -0,0 +1,732 @@
1/* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
2 *
3 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 *
10 * Thanks to the following companies for their support:
11 *
12 * - JMicron (hardware and technical support)
13 */
14
15#include <linux/delay.h>
16#include <linux/highmem.h>
17#include <linux/pci.h>
18#include <linux/dma-mapping.h>
19
20#include <linux/mmc/host.h>
21
22#include <asm/scatterlist.h>
23#include <asm/io.h>
24
25#include "sdhci.h"
26
27/*
28 * PCI registers
29 */
30
31#define PCI_SDHCI_IFPIO 0x00
32#define PCI_SDHCI_IFDMA 0x01
33#define PCI_SDHCI_IFVENDOR 0x02
34
35#define PCI_SLOT_INFO 0x40 /* 8 bits */
36#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
37#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
38
39#define MAX_SLOTS 8
40
41struct sdhci_pci_chip;
42struct sdhci_pci_slot;
43
44struct sdhci_pci_fixes {
45 unsigned int quirks;
46
47 int (*probe)(struct sdhci_pci_chip*);
48
49 int (*probe_slot)(struct sdhci_pci_slot*);
50 void (*remove_slot)(struct sdhci_pci_slot*, int);
51
52 int (*suspend)(struct sdhci_pci_chip*,
53 pm_message_t);
54 int (*resume)(struct sdhci_pci_chip*);
55};
56
57struct sdhci_pci_slot {
58 struct sdhci_pci_chip *chip;
59 struct sdhci_host *host;
60
61 int pci_bar;
62};
63
64struct sdhci_pci_chip {
65 struct pci_dev *pdev;
66
67 unsigned int quirks;
68 const struct sdhci_pci_fixes *fixes;
69
70 int num_slots; /* Slots on controller */
71 struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
72};
73
74
75/*****************************************************************************\
76 * *
77 * Hardware specific quirk handling *
78 * *
79\*****************************************************************************/
80
81static int ricoh_probe(struct sdhci_pci_chip *chip)
82{
83 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
84 chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET;
85
86 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)
87 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
88
89 return 0;
90}
91
92static const struct sdhci_pci_fixes sdhci_ricoh = {
93 .probe = ricoh_probe,
94 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR,
95};
96
97static const struct sdhci_pci_fixes sdhci_ene_712 = {
98 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
99 SDHCI_QUIRK_BROKEN_DMA,
100};
101
102static const struct sdhci_pci_fixes sdhci_ene_714 = {
103 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
104 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
105 SDHCI_QUIRK_BROKEN_DMA,
106};
107
108static const struct sdhci_pci_fixes sdhci_cafe = {
109 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
110 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
111};
112
113static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
114{
115 u8 scratch;
116 int ret;
117
118 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
119 if (ret)
120 return ret;
121
122 /*
123 * Turn PMOS on [bit 0], set over current detection to 2.4 V
124 * [bit 1:2] and enable over current debouncing [bit 6].
125 */
126 if (on)
127 scratch |= 0x47;
128 else
129 scratch &= ~0x47;
130
131 ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
132 if (ret)
133 return ret;
134
135 return 0;
136}
137
138static int jmicron_probe(struct sdhci_pci_chip *chip)
139{
140 int ret;
141
142 if (chip->pdev->revision == 0) {
143 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
144 SDHCI_QUIRK_32BIT_DMA_SIZE |
145 SDHCI_QUIRK_32BIT_ADMA_SIZE |
146 SDHCI_QUIRK_RESET_AFTER_REQUEST;
147 }
148
149 /*
150 * JMicron chips can have two interfaces to the same hardware
151 * in order to work around limitations in Microsoft's driver.
152 * We need to make sure we only bind to one of them.
153 *
154 * This code assumes two things:
155 *
156 * 1. The PCI code adds subfunctions in order.
157 *
158 * 2. The MMC interface has a lower subfunction number
159 * than the SD interface.
160 */
161 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) {
162 struct pci_dev *sd_dev;
163
164 sd_dev = NULL;
165 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
166 PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) {
167 if ((PCI_SLOT(chip->pdev->devfn) ==
168 PCI_SLOT(sd_dev->devfn)) &&
169 (chip->pdev->bus == sd_dev->bus))
170 break;
171 }
172
173 if (sd_dev) {
174 pci_dev_put(sd_dev);
175 dev_info(&chip->pdev->dev, "Refusing to bind to "
176 "secondary interface.\n");
177 return -ENODEV;
178 }
179 }
180
181 /*
182 * JMicron chips need a bit of a nudge to enable the power
183 * output pins.
184 */
185 ret = jmicron_pmos(chip, 1);
186 if (ret) {
187 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
188 return ret;
189 }
190
191 return 0;
192}
193
194static void jmicron_enable_mmc(struct sdhci_host *host, int on)
195{
196 u8 scratch;
197
198 scratch = readb(host->ioaddr + 0xC0);
199
200 if (on)
201 scratch |= 0x01;
202 else
203 scratch &= ~0x01;
204
205 writeb(scratch, host->ioaddr + 0xC0);
206}
207
208static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
209{
210 if (slot->chip->pdev->revision == 0) {
211 u16 version;
212
213 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
214 version = (version & SDHCI_VENDOR_VER_MASK) >>
215 SDHCI_VENDOR_VER_SHIFT;
216
217 /*
218 * Older versions of the chip have lots of nasty glitches
219 * in the ADMA engine. It's best just to avoid it
220 * completely.
221 */
222 if (version < 0xAC)
223 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
224 }
225
226 /*
227 * The secondary interface requires a bit set to get the
228 * interrupts.
229 */
230 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
231 jmicron_enable_mmc(slot->host, 1);
232
233 return 0;
234}
235
236static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
237{
238 if (dead)
239 return;
240
241 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
242 jmicron_enable_mmc(slot->host, 0);
243}
244
245static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
246{
247 int i;
248
249 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
250 for (i = 0;i < chip->num_slots;i++)
251 jmicron_enable_mmc(chip->slots[i]->host, 0);
252 }
253
254 return 0;
255}
256
257static int jmicron_resume(struct sdhci_pci_chip *chip)
258{
259 int ret, i;
260
261 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
262 for (i = 0;i < chip->num_slots;i++)
263 jmicron_enable_mmc(chip->slots[i]->host, 1);
264 }
265
266 ret = jmicron_pmos(chip, 1);
267 if (ret) {
268 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
269 return ret;
270 }
271
272 return 0;
273}
274
275static const struct sdhci_pci_fixes sdhci_jmicron = {
276 .probe = jmicron_probe,
277
278 .probe_slot = jmicron_probe_slot,
279 .remove_slot = jmicron_remove_slot,
280
281 .suspend = jmicron_suspend,
282 .resume = jmicron_resume,
283};
284
285static const struct pci_device_id pci_ids[] __devinitdata = {
286 {
287 .vendor = PCI_VENDOR_ID_RICOH,
288 .device = PCI_DEVICE_ID_RICOH_R5C822,
289 .subvendor = PCI_ANY_ID,
290 .subdevice = PCI_ANY_ID,
291 .driver_data = (kernel_ulong_t)&sdhci_ricoh,
292 },
293
294 {
295 .vendor = PCI_VENDOR_ID_ENE,
296 .device = PCI_DEVICE_ID_ENE_CB712_SD,
297 .subvendor = PCI_ANY_ID,
298 .subdevice = PCI_ANY_ID,
299 .driver_data = (kernel_ulong_t)&sdhci_ene_712,
300 },
301
302 {
303 .vendor = PCI_VENDOR_ID_ENE,
304 .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
305 .subvendor = PCI_ANY_ID,
306 .subdevice = PCI_ANY_ID,
307 .driver_data = (kernel_ulong_t)&sdhci_ene_712,
308 },
309
310 {
311 .vendor = PCI_VENDOR_ID_ENE,
312 .device = PCI_DEVICE_ID_ENE_CB714_SD,
313 .subvendor = PCI_ANY_ID,
314 .subdevice = PCI_ANY_ID,
315 .driver_data = (kernel_ulong_t)&sdhci_ene_714,
316 },
317
318 {
319 .vendor = PCI_VENDOR_ID_ENE,
320 .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
321 .subvendor = PCI_ANY_ID,
322 .subdevice = PCI_ANY_ID,
323 .driver_data = (kernel_ulong_t)&sdhci_ene_714,
324 },
325
326 {
327 .vendor = PCI_VENDOR_ID_MARVELL,
328 .device = PCI_DEVICE_ID_MARVELL_CAFE_SD,
329 .subvendor = PCI_ANY_ID,
330 .subdevice = PCI_ANY_ID,
331 .driver_data = (kernel_ulong_t)&sdhci_cafe,
332 },
333
334 {
335 .vendor = PCI_VENDOR_ID_JMICRON,
336 .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
337 .subvendor = PCI_ANY_ID,
338 .subdevice = PCI_ANY_ID,
339 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
340 },
341
342 {
343 .vendor = PCI_VENDOR_ID_JMICRON,
344 .device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC,
345 .subvendor = PCI_ANY_ID,
346 .subdevice = PCI_ANY_ID,
347 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
348 },
349
350 { /* Generic SD host controller */
351 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
352 },
353
354 { /* end: all zeroes */ },
355};
356
357MODULE_DEVICE_TABLE(pci, pci_ids);
358
359/*****************************************************************************\
360 * *
361 * SDHCI core callbacks *
362 * *
363\*****************************************************************************/
364
365static int sdhci_pci_enable_dma(struct sdhci_host *host)
366{
367 struct sdhci_pci_slot *slot;
368 struct pci_dev *pdev;
369 int ret;
370
371 slot = sdhci_priv(host);
372 pdev = slot->chip->pdev;
373
374 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
375 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
376 (host->flags & SDHCI_USE_DMA)) {
377 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
378 "doesn't fully claim to support it.\n");
379 }
380
381 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
382 if (ret)
383 return ret;
384
385 pci_set_master(pdev);
386
387 return 0;
388}
389
390static struct sdhci_ops sdhci_pci_ops = {
391 .enable_dma = sdhci_pci_enable_dma,
392};
393
394/*****************************************************************************\
395 * *
396 * Suspend/resume *
397 * *
398\*****************************************************************************/
399
400#ifdef CONFIG_PM
401
402static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
403{
404 struct sdhci_pci_chip *chip;
405 struct sdhci_pci_slot *slot;
406 int i, ret;
407
408 chip = pci_get_drvdata(pdev);
409 if (!chip)
410 return 0;
411
412 for (i = 0;i < chip->num_slots;i++) {
413 slot = chip->slots[i];
414 if (!slot)
415 continue;
416
417 ret = sdhci_suspend_host(slot->host, state);
418
419 if (ret) {
420 for (i--;i >= 0;i--)
421 sdhci_resume_host(chip->slots[i]->host);
422 return ret;
423 }
424 }
425
426 if (chip->fixes && chip->fixes->suspend) {
427 ret = chip->fixes->suspend(chip, state);
428 if (ret) {
429 for (i = chip->num_slots - 1;i >= 0;i--)
430 sdhci_resume_host(chip->slots[i]->host);
431 return ret;
432 }
433 }
434
435 pci_save_state(pdev);
436 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
437 pci_disable_device(pdev);
438 pci_set_power_state(pdev, pci_choose_state(pdev, state));
439
440 return 0;
441}
442
443static int sdhci_pci_resume (struct pci_dev *pdev)
444{
445 struct sdhci_pci_chip *chip;
446 struct sdhci_pci_slot *slot;
447 int i, ret;
448
449 chip = pci_get_drvdata(pdev);
450 if (!chip)
451 return 0;
452
453 pci_set_power_state(pdev, PCI_D0);
454 pci_restore_state(pdev);
455 ret = pci_enable_device(pdev);
456 if (ret)
457 return ret;
458
459 if (chip->fixes && chip->fixes->resume) {
460 ret = chip->fixes->resume(chip);
461 if (ret)
462 return ret;
463 }
464
465 for (i = 0;i < chip->num_slots;i++) {
466 slot = chip->slots[i];
467 if (!slot)
468 continue;
469
470 ret = sdhci_resume_host(slot->host);
471 if (ret)
472 return ret;
473 }
474
475 return 0;
476}
477
478#else /* CONFIG_PM */
479
480#define sdhci_pci_suspend NULL
481#define sdhci_pci_resume NULL
482
483#endif /* CONFIG_PM */
484
485/*****************************************************************************\
486 * *
487 * Device probing/removal *
488 * *
489\*****************************************************************************/
490
491static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
492 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar)
493{
494 struct sdhci_pci_slot *slot;
495 struct sdhci_host *host;
496
497 resource_size_t addr;
498
499 int ret;
500
501 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
502 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
503 return ERR_PTR(-ENODEV);
504 }
505
506 if (pci_resource_len(pdev, bar) != 0x100) {
507 dev_err(&pdev->dev, "Invalid iomem size. You may "
508 "experience problems.\n");
509 }
510
511 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
512 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
513 return ERR_PTR(-ENODEV);
514 }
515
516 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
517 dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
518 return ERR_PTR(-ENODEV);
519 }
520
521 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
522 if (IS_ERR(host)) {
523 ret = PTR_ERR(host);
524 goto unmap;
525 }
526
527 slot = sdhci_priv(host);
528
529 slot->chip = chip;
530 slot->host = host;
531 slot->pci_bar = bar;
532
533 host->hw_name = "PCI";
534 host->ops = &sdhci_pci_ops;
535 host->quirks = chip->quirks;
536
537 host->irq = pdev->irq;
538
539 ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc));
540 if (ret) {
541 dev_err(&pdev->dev, "cannot request region\n");
542 return ERR_PTR(ret);
543 }
544
545 addr = pci_resource_start(pdev, bar);
546 host->ioaddr = ioremap_nocache(addr, pci_resource_len(pdev, bar));
547 if (!host->ioaddr) {
548 dev_err(&pdev->dev, "failed to remap registers\n");
549 goto release;
550 }
551
552 if (chip->fixes && chip->fixes->probe_slot) {
553 ret = chip->fixes->probe_slot(slot);
554 if (ret)
555 goto unmap;
556 }
557
558 ret = sdhci_add_host(host);
559 if (ret)
560 goto remove;
561
562 return slot;
563
564remove:
565 if (chip->fixes && chip->fixes->remove_slot)
566 chip->fixes->remove_slot(slot, 0);
567
568unmap:
569 iounmap(host->ioaddr);
570
571release:
572 pci_release_region(pdev, bar);
573 sdhci_free_host(host);
574
575 return ERR_PTR(ret);
576}
577
578static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
579{
580 int dead;
581 u32 scratch;
582
583 dead = 0;
584 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
585 if (scratch == (u32)-1)
586 dead = 1;
587
588 sdhci_remove_host(slot->host, dead);
589
590 if (slot->chip->fixes && slot->chip->fixes->remove_slot)
591 slot->chip->fixes->remove_slot(slot, dead);
592
593 pci_release_region(slot->chip->pdev, slot->pci_bar);
594
595 sdhci_free_host(slot->host);
596}
597
598static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
599 const struct pci_device_id *ent)
600{
601 struct sdhci_pci_chip *chip;
602 struct sdhci_pci_slot *slot;
603
604 u8 slots, rev, first_bar;
605 int ret, i;
606
607 BUG_ON(pdev == NULL);
608 BUG_ON(ent == NULL);
609
610 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
611
612 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
613 (int)pdev->vendor, (int)pdev->device, (int)rev);
614
615 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
616 if (ret)
617 return ret;
618
619 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
620 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
621 if (slots == 0)
622 return -ENODEV;
623
624 BUG_ON(slots > MAX_SLOTS);
625
626 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
627 if (ret)
628 return ret;
629
630 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
631
632 if (first_bar > 5) {
633 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
634 return -ENODEV;
635 }
636
637 ret = pci_enable_device(pdev);
638 if (ret)
639 return ret;
640
641 chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL);
642 if (!chip) {
643 ret = -ENOMEM;
644 goto err;
645 }
646
647 chip->pdev = pdev;
648 chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data;
649 if (chip->fixes)
650 chip->quirks = chip->fixes->quirks;
651 chip->num_slots = slots;
652
653 pci_set_drvdata(pdev, chip);
654
655 if (chip->fixes && chip->fixes->probe) {
656 ret = chip->fixes->probe(chip);
657 if (ret)
658 goto free;
659 }
660
661 for (i = 0;i < slots;i++) {
662 slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
663 if (IS_ERR(slot)) {
664 for (i--;i >= 0;i--)
665 sdhci_pci_remove_slot(chip->slots[i]);
666 ret = PTR_ERR(slot);
667 goto free;
668 }
669
670 chip->slots[i] = slot;
671 }
672
673 return 0;
674
675free:
676 pci_set_drvdata(pdev, NULL);
677 kfree(chip);
678
679err:
680 pci_disable_device(pdev);
681 return ret;
682}
683
684static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
685{
686 int i;
687 struct sdhci_pci_chip *chip;
688
689 chip = pci_get_drvdata(pdev);
690
691 if (chip) {
692 for (i = 0;i < chip->num_slots; i++)
693 sdhci_pci_remove_slot(chip->slots[i]);
694
695 pci_set_drvdata(pdev, NULL);
696 kfree(chip);
697 }
698
699 pci_disable_device(pdev);
700}
701
702static struct pci_driver sdhci_driver = {
703 .name = "sdhci-pci",
704 .id_table = pci_ids,
705 .probe = sdhci_pci_probe,
706 .remove = __devexit_p(sdhci_pci_remove),
707 .suspend = sdhci_pci_suspend,
708 .resume = sdhci_pci_resume,
709};
710
711/*****************************************************************************\
712 * *
713 * Driver init/exit *
714 * *
715\*****************************************************************************/
716
717static int __init sdhci_drv_init(void)
718{
719 return pci_register_driver(&sdhci_driver);
720}
721
722static void __exit sdhci_drv_exit(void)
723{
724 pci_unregister_driver(&sdhci_driver);
725}
726
727module_init(sdhci_drv_init);
728module_exit(sdhci_drv_exit);
729
730MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
731MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
732MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b413aa6c246b..17701c3da733 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -15,7 +15,7 @@
15 15
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/highmem.h> 17#include <linux/highmem.h>
18#include <linux/pci.h> 18#include <linux/io.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/scatterlist.h> 20#include <linux/scatterlist.h>
21 21
@@ -32,135 +32,6 @@
32 32
33static unsigned int debug_quirks = 0; 33static unsigned int debug_quirks = 0;
34 34
35/*
36 * Different quirks to handle when the hardware deviates from a strict
37 * interpretation of the SDHCI specification.
38 */
39
40/* Controller doesn't honor resets unless we touch the clock register */
41#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
42/* Controller has bad caps bits, but really supports DMA */
43#define SDHCI_QUIRK_FORCE_DMA (1<<1)
44/* Controller doesn't like to be reset when there is no card inserted. */
45#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
46/* Controller doesn't like clearing the power reg before a change */
47#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
48/* Controller has flaky internal state so reset it on each ios change */
49#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
50/* Controller has an unusable DMA engine */
51#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
52/* Controller can only DMA from 32-bit aligned addresses */
53#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6)
54/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
55#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7)
56/* Controller needs to be reset after each request to stay stable */
57#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8)
58/* Controller needs voltage and power writes to happen separately */
59#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<9)
60/* Controller has an off-by-one issue with timeout value */
61#define SDHCI_QUIRK_INCR_TIMEOUT_CONTROL (1<<10)
62
63static const struct pci_device_id pci_ids[] __devinitdata = {
64 {
65 .vendor = PCI_VENDOR_ID_RICOH,
66 .device = PCI_DEVICE_ID_RICOH_R5C822,
67 .subvendor = PCI_VENDOR_ID_IBM,
68 .subdevice = PCI_ANY_ID,
69 .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
70 SDHCI_QUIRK_FORCE_DMA,
71 },
72
73 {
74 .vendor = PCI_VENDOR_ID_RICOH,
75 .device = PCI_DEVICE_ID_RICOH_R5C822,
76 .subvendor = PCI_VENDOR_ID_SAMSUNG,
77 .subdevice = PCI_ANY_ID,
78 .driver_data = SDHCI_QUIRK_FORCE_DMA |
79 SDHCI_QUIRK_NO_CARD_NO_RESET,
80 },
81
82 {
83 .vendor = PCI_VENDOR_ID_RICOH,
84 .device = PCI_DEVICE_ID_RICOH_R5C822,
85 .subvendor = PCI_ANY_ID,
86 .subdevice = PCI_ANY_ID,
87 .driver_data = SDHCI_QUIRK_FORCE_DMA,
88 },
89
90 {
91 .vendor = PCI_VENDOR_ID_TI,
92 .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
93 .subvendor = PCI_ANY_ID,
94 .subdevice = PCI_ANY_ID,
95 .driver_data = SDHCI_QUIRK_FORCE_DMA,
96 },
97
98 {
99 .vendor = PCI_VENDOR_ID_ENE,
100 .device = PCI_DEVICE_ID_ENE_CB712_SD,
101 .subvendor = PCI_ANY_ID,
102 .subdevice = PCI_ANY_ID,
103 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
104 SDHCI_QUIRK_BROKEN_DMA,
105 },
106
107 {
108 .vendor = PCI_VENDOR_ID_ENE,
109 .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
110 .subvendor = PCI_ANY_ID,
111 .subdevice = PCI_ANY_ID,
112 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
113 SDHCI_QUIRK_BROKEN_DMA,
114 },
115
116 {
117 .vendor = PCI_VENDOR_ID_ENE,
118 .device = PCI_DEVICE_ID_ENE_CB714_SD,
119 .subvendor = PCI_ANY_ID,
120 .subdevice = PCI_ANY_ID,
121 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
122 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
123 SDHCI_QUIRK_BROKEN_DMA,
124 },
125
126 {
127 .vendor = PCI_VENDOR_ID_ENE,
128 .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
129 .subvendor = PCI_ANY_ID,
130 .subdevice = PCI_ANY_ID,
131 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
132 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
133 SDHCI_QUIRK_BROKEN_DMA,
134 },
135
136 {
137 .vendor = PCI_VENDOR_ID_MARVELL,
138 .device = PCI_DEVICE_ID_MARVELL_CAFE_SD,
139 .subvendor = PCI_ANY_ID,
140 .subdevice = PCI_ANY_ID,
141 .driver_data = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
142 SDHCI_QUIRK_INCR_TIMEOUT_CONTROL,
143 },
144
145 {
146 .vendor = PCI_VENDOR_ID_JMICRON,
147 .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
148 .subvendor = PCI_ANY_ID,
149 .subdevice = PCI_ANY_ID,
150 .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR |
151 SDHCI_QUIRK_32BIT_DMA_SIZE |
152 SDHCI_QUIRK_RESET_AFTER_REQUEST,
153 },
154
155 { /* Generic SD host controller */
156 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
157 },
158
159 { /* end: all zeroes */ },
160};
161
162MODULE_DEVICE_TABLE(pci, pci_ids);
163
164static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); 35static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
165static void sdhci_finish_data(struct sdhci_host *); 36static void sdhci_finish_data(struct sdhci_host *);
166 37
@@ -215,7 +86,7 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
215{ 86{
216 unsigned long timeout; 87 unsigned long timeout;
217 88
218 if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 89 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
219 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & 90 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
220 SDHCI_CARD_PRESENT)) 91 SDHCI_CARD_PRESENT))
221 return; 92 return;
@@ -253,7 +124,8 @@ static void sdhci_init(struct sdhci_host *host)
253 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 124 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
254 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | 125 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
255 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 126 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
256 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; 127 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
128 SDHCI_INT_ADMA_ERROR;
257 129
258 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); 130 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
259 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); 131 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
@@ -443,23 +315,226 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
443 DBG("PIO transfer complete.\n"); 315 DBG("PIO transfer complete.\n");
444} 316}
445 317
446static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 318static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
447{ 319{
448 u8 count; 320 local_irq_save(*flags);
449 unsigned target_timeout, current_timeout; 321 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
322}
450 323
451 WARN_ON(host->data); 324static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
325{
326 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
327 local_irq_restore(*flags);
328}
452 329
453 if (data == NULL) 330static int sdhci_adma_table_pre(struct sdhci_host *host,
454 return; 331 struct mmc_data *data)
332{
333 int direction;
455 334
456 /* Sanity checks */ 335 u8 *desc;
457 BUG_ON(data->blksz * data->blocks > 524288); 336 u8 *align;
458 BUG_ON(data->blksz > host->mmc->max_blk_size); 337 dma_addr_t addr;
459 BUG_ON(data->blocks > 65535); 338 dma_addr_t align_addr;
339 int len, offset;
460 340
461 host->data = data; 341 struct scatterlist *sg;
462 host->data_early = 0; 342 int i;
343 char *buffer;
344 unsigned long flags;
345
346 /*
347 * The spec does not specify endianness of descriptor table.
348 * We currently guess that it is LE.
349 */
350
351 if (data->flags & MMC_DATA_READ)
352 direction = DMA_FROM_DEVICE;
353 else
354 direction = DMA_TO_DEVICE;
355
356 /*
357 * The ADMA descriptor table is mapped further down as we
358 * need to fill it with data first.
359 */
360
361 host->align_addr = dma_map_single(mmc_dev(host->mmc),
362 host->align_buffer, 128 * 4, direction);
363 if (dma_mapping_error(host->align_addr))
364 goto fail;
365 BUG_ON(host->align_addr & 0x3);
366
367 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
368 data->sg, data->sg_len, direction);
369 if (host->sg_count == 0)
370 goto unmap_align;
371
372 desc = host->adma_desc;
373 align = host->align_buffer;
374
375 align_addr = host->align_addr;
376
377 for_each_sg(data->sg, sg, host->sg_count, i) {
378 addr = sg_dma_address(sg);
379 len = sg_dma_len(sg);
380
381 /*
382 * The SDHCI specification states that ADMA
383 * addresses must be 32-bit aligned. If they
384 * aren't, then we use a bounce buffer for
385 * the (up to three) bytes that screw up the
386 * alignment.
387 */
388 offset = (4 - (addr & 0x3)) & 0x3;
389 if (offset) {
390 if (data->flags & MMC_DATA_WRITE) {
391 buffer = sdhci_kmap_atomic(sg, &flags);
392 memcpy(align, buffer, offset);
393 sdhci_kunmap_atomic(buffer, &flags);
394 }
395
396 desc[7] = (align_addr >> 24) & 0xff;
397 desc[6] = (align_addr >> 16) & 0xff;
398 desc[5] = (align_addr >> 8) & 0xff;
399 desc[4] = (align_addr >> 0) & 0xff;
400
401 BUG_ON(offset > 65536);
402
403 desc[3] = (offset >> 8) & 0xff;
404 desc[2] = (offset >> 0) & 0xff;
405
406 desc[1] = 0x00;
407 desc[0] = 0x21; /* tran, valid */
408
409 align += 4;
410 align_addr += 4;
411
412 desc += 8;
413
414 addr += offset;
415 len -= offset;
416 }
417
418 desc[7] = (addr >> 24) & 0xff;
419 desc[6] = (addr >> 16) & 0xff;
420 desc[5] = (addr >> 8) & 0xff;
421 desc[4] = (addr >> 0) & 0xff;
422
423 BUG_ON(len > 65536);
424
425 desc[3] = (len >> 8) & 0xff;
426 desc[2] = (len >> 0) & 0xff;
427
428 desc[1] = 0x00;
429 desc[0] = 0x21; /* tran, valid */
430
431 desc += 8;
432
433 /*
434 * If this triggers then we have a calculation bug
435 * somewhere. :/
436 */
437 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
438 }
439
440 /*
441 * Add a terminating entry.
442 */
443 desc[7] = 0;
444 desc[6] = 0;
445 desc[5] = 0;
446 desc[4] = 0;
447
448 desc[3] = 0;
449 desc[2] = 0;
450
451 desc[1] = 0x00;
452 desc[0] = 0x03; /* nop, end, valid */
453
454 /*
455 * Resync align buffer as we might have changed it.
456 */
457 if (data->flags & MMC_DATA_WRITE) {
458 dma_sync_single_for_device(mmc_dev(host->mmc),
459 host->align_addr, 128 * 4, direction);
460 }
461
462 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
463 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
464 if (dma_mapping_error(host->align_addr))
465 goto unmap_entries;
466 BUG_ON(host->adma_addr & 0x3);
467
468 return 0;
469
470unmap_entries:
471 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
472 data->sg_len, direction);
473unmap_align:
474 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
475 128 * 4, direction);
476fail:
477 return -EINVAL;
478}
479
480static void sdhci_adma_table_post(struct sdhci_host *host,
481 struct mmc_data *data)
482{
483 int direction;
484
485 struct scatterlist *sg;
486 int i, size;
487 u8 *align;
488 char *buffer;
489 unsigned long flags;
490
491 if (data->flags & MMC_DATA_READ)
492 direction = DMA_FROM_DEVICE;
493 else
494 direction = DMA_TO_DEVICE;
495
496 dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
497 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
498
499 dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
500 128 * 4, direction);
501
502 if (data->flags & MMC_DATA_READ) {
503 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
504 data->sg_len, direction);
505
506 align = host->align_buffer;
507
508 for_each_sg(data->sg, sg, host->sg_count, i) {
509 if (sg_dma_address(sg) & 0x3) {
510 size = 4 - (sg_dma_address(sg) & 0x3);
511
512 buffer = sdhci_kmap_atomic(sg, &flags);
513 memcpy(buffer, align, size);
514 sdhci_kunmap_atomic(buffer, &flags);
515
516 align += 4;
517 }
518 }
519 }
520
521 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
522 data->sg_len, direction);
523}
524
525static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
526{
527 u8 count;
528 unsigned target_timeout, current_timeout;
529
530 /*
531 * If the host controller provides us with an incorrect timeout
532 * value, just skip the check and use 0xE. The hardware may take
533 * longer to time out, but that's much better than having a too-short
534 * timeout value.
535 */
536 if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
537 return 0xE;
463 538
464 /* timeout in us */ 539 /* timeout in us */
465 target_timeout = data->timeout_ns / 1000 + 540 target_timeout = data->timeout_ns / 1000 +
@@ -484,52 +559,158 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
484 break; 559 break;
485 } 560 }
486 561
487 /*
488 * Compensate for an off-by-one error in the CaFe hardware; otherwise,
489 * a too-small count gives us interrupt timeouts.
490 */
491 if ((host->chip->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL))
492 count++;
493
494 if (count >= 0xF) { 562 if (count >= 0xF) {
495 printk(KERN_WARNING "%s: Too large timeout requested!\n", 563 printk(KERN_WARNING "%s: Too large timeout requested!\n",
496 mmc_hostname(host->mmc)); 564 mmc_hostname(host->mmc));
497 count = 0xE; 565 count = 0xE;
498 } 566 }
499 567
568 return count;
569}
570
571static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
572{
573 u8 count;
574 u8 ctrl;
575 int ret;
576
577 WARN_ON(host->data);
578
579 if (data == NULL)
580 return;
581
582 /* Sanity checks */
583 BUG_ON(data->blksz * data->blocks > 524288);
584 BUG_ON(data->blksz > host->mmc->max_blk_size);
585 BUG_ON(data->blocks > 65535);
586
587 host->data = data;
588 host->data_early = 0;
589
590 count = sdhci_calc_timeout(host, data);
500 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); 591 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
501 592
502 if (host->flags & SDHCI_USE_DMA) 593 if (host->flags & SDHCI_USE_DMA)
503 host->flags |= SDHCI_REQ_USE_DMA; 594 host->flags |= SDHCI_REQ_USE_DMA;
504 595
505 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 596 /*
506 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && 597 * FIXME: This doesn't account for merging when mapping the
507 ((data->blksz * data->blocks) & 0x3))) { 598 * scatterlist.
508 DBG("Reverting to PIO because of transfer size (%d)\n", 599 */
509 data->blksz * data->blocks); 600 if (host->flags & SDHCI_REQ_USE_DMA) {
510 host->flags &= ~SDHCI_REQ_USE_DMA; 601 int broken, i;
602 struct scatterlist *sg;
603
604 broken = 0;
605 if (host->flags & SDHCI_USE_ADMA) {
606 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
607 broken = 1;
608 } else {
609 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
610 broken = 1;
611 }
612
613 if (unlikely(broken)) {
614 for_each_sg(data->sg, sg, data->sg_len, i) {
615 if (sg->length & 0x3) {
616 DBG("Reverting to PIO because of "
617 "transfer size (%d)\n",
618 sg->length);
619 host->flags &= ~SDHCI_REQ_USE_DMA;
620 break;
621 }
622 }
623 }
511 } 624 }
512 625
513 /* 626 /*
514 * The assumption here being that alignment is the same after 627 * The assumption here being that alignment is the same after
515 * translation to device address space. 628 * translation to device address space.
516 */ 629 */
517 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && 630 if (host->flags & SDHCI_REQ_USE_DMA) {
518 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 631 int broken, i;
519 (data->sg->offset & 0x3))) { 632 struct scatterlist *sg;
520 DBG("Reverting to PIO because of bad alignment\n"); 633
521 host->flags &= ~SDHCI_REQ_USE_DMA; 634 broken = 0;
635 if (host->flags & SDHCI_USE_ADMA) {
636 /*
637 * As we use 3 byte chunks to work around
638 * alignment problems, we need to check this
639 * quirk.
640 */
641 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
642 broken = 1;
643 } else {
644 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
645 broken = 1;
646 }
647
648 if (unlikely(broken)) {
649 for_each_sg(data->sg, sg, data->sg_len, i) {
650 if (sg->offset & 0x3) {
651 DBG("Reverting to PIO because of "
652 "bad alignment\n");
653 host->flags &= ~SDHCI_REQ_USE_DMA;
654 break;
655 }
656 }
657 }
522 } 658 }
523 659
524 if (host->flags & SDHCI_REQ_USE_DMA) { 660 if (host->flags & SDHCI_REQ_USE_DMA) {
525 int count; 661 if (host->flags & SDHCI_USE_ADMA) {
662 ret = sdhci_adma_table_pre(host, data);
663 if (ret) {
664 /*
665 * This only happens when someone fed
666 * us an invalid request.
667 */
668 WARN_ON(1);
669 host->flags &= ~SDHCI_USE_DMA;
670 } else {
671 writel(host->adma_addr,
672 host->ioaddr + SDHCI_ADMA_ADDRESS);
673 }
674 } else {
675 int sg_cnt;
676
677 sg_cnt = dma_map_sg(mmc_dev(host->mmc),
678 data->sg, data->sg_len,
679 (data->flags & MMC_DATA_READ) ?
680 DMA_FROM_DEVICE :
681 DMA_TO_DEVICE);
682 if (sg_cnt == 0) {
683 /*
684 * This only happens when someone fed
685 * us an invalid request.
686 */
687 WARN_ON(1);
688 host->flags &= ~SDHCI_USE_DMA;
689 } else {
690 WARN_ON(count != 1);
691 writel(sg_dma_address(data->sg),
692 host->ioaddr + SDHCI_DMA_ADDRESS);
693 }
694 }
695 }
526 696
527 count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, 697 /*
528 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 698 * Always adjust the DMA selection as some controllers
529 BUG_ON(count != 1); 699 * (e.g. JMicron) can't do PIO properly when the selection
700 * is ADMA.
701 */
702 if (host->version >= SDHCI_SPEC_200) {
703 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
704 ctrl &= ~SDHCI_CTRL_DMA_MASK;
705 if ((host->flags & SDHCI_REQ_USE_DMA) &&
706 (host->flags & SDHCI_USE_ADMA))
707 ctrl |= SDHCI_CTRL_ADMA32;
708 else
709 ctrl |= SDHCI_CTRL_SDMA;
710 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
711 }
530 712
531 writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS); 713 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
532 } else {
533 host->cur_sg = data->sg; 714 host->cur_sg = data->sg;
534 host->num_sg = data->sg_len; 715 host->num_sg = data->sg_len;
535 716
@@ -567,7 +748,6 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
567static void sdhci_finish_data(struct sdhci_host *host) 748static void sdhci_finish_data(struct sdhci_host *host)
568{ 749{
569 struct mmc_data *data; 750 struct mmc_data *data;
570 u16 blocks;
571 751
572 BUG_ON(!host->data); 752 BUG_ON(!host->data);
573 753
@@ -575,25 +755,26 @@ static void sdhci_finish_data(struct sdhci_host *host)
575 host->data = NULL; 755 host->data = NULL;
576 756
577 if (host->flags & SDHCI_REQ_USE_DMA) { 757 if (host->flags & SDHCI_REQ_USE_DMA) {
578 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, 758 if (host->flags & SDHCI_USE_ADMA)
579 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); 759 sdhci_adma_table_post(host, data);
760 else {
761 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
762 data->sg_len, (data->flags & MMC_DATA_READ) ?
763 DMA_FROM_DEVICE : DMA_TO_DEVICE);
764 }
580 } 765 }
581 766
582 /* 767 /*
583 * Controller doesn't count down when in single block mode. 768 * The specification states that the block count register must
769 * be updated, but it does not specify at what point in the
770 * data flow. That makes the register entirely useless to read
771 * back so we have to assume that nothing made it to the card
772 * in the event of an error.
584 */ 773 */
585 if (data->blocks == 1) 774 if (data->error)
586 blocks = (data->error == 0) ? 0 : 1; 775 data->bytes_xfered = 0;
587 else 776 else
588 blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT); 777 data->bytes_xfered = data->blksz * data->blocks;
589 data->bytes_xfered = data->blksz * (data->blocks - blocks);
590
591 if (!data->error && blocks) {
592 printk(KERN_ERR "%s: Controller signalled completion even "
593 "though there were blocks left.\n",
594 mmc_hostname(host->mmc));
595 data->error = -EIO;
596 }
597 778
598 if (data->stop) { 779 if (data->stop) {
599 /* 780 /*
@@ -775,7 +956,7 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
775 * Spec says that we should clear the power reg before setting 956 * Spec says that we should clear the power reg before setting
776 * a new value. Some controllers don't seem to like this though. 957 * a new value. Some controllers don't seem to like this though.
777 */ 958 */
778 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 959 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
779 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 960 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
780 961
781 pwr = SDHCI_POWER_ON; 962 pwr = SDHCI_POWER_ON;
@@ -797,10 +978,10 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
797 } 978 }
798 979
799 /* 980 /*
800 * At least the CaFe chip gets confused if we set the voltage 981 * At least the Marvell CaFe chip gets confused if we set the voltage
801 * and set turn on power at the same time, so set the voltage first. 982 * and set turn on power at the same time, so set the voltage first.
802 */ 983 */
803 if ((host->chip->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) 984 if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
804 writeb(pwr & ~SDHCI_POWER_ON, 985 writeb(pwr & ~SDHCI_POWER_ON,
805 host->ioaddr + SDHCI_POWER_CONTROL); 986 host->ioaddr + SDHCI_POWER_CONTROL);
806 987
@@ -833,7 +1014,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
833 1014
834 host->mrq = mrq; 1015 host->mrq = mrq;
835 1016
836 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 1017 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)
1018 || (host->flags & SDHCI_DEVICE_DEAD)) {
837 host->mrq->cmd->error = -ENOMEDIUM; 1019 host->mrq->cmd->error = -ENOMEDIUM;
838 tasklet_schedule(&host->finish_tasklet); 1020 tasklet_schedule(&host->finish_tasklet);
839 } else 1021 } else
@@ -853,6 +1035,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
853 1035
854 spin_lock_irqsave(&host->lock, flags); 1036 spin_lock_irqsave(&host->lock, flags);
855 1037
1038 if (host->flags & SDHCI_DEVICE_DEAD)
1039 goto out;
1040
856 /* 1041 /*
857 * Reset the chip on each power off. 1042 * Reset the chip on each power off.
858 * Should clear out any weird states. 1043 * Should clear out any weird states.
@@ -888,9 +1073,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
888 * signalling timeout and CRC errors even on CMD0. Resetting 1073 * signalling timeout and CRC errors even on CMD0. Resetting
889 * it on each ios seems to solve the problem. 1074 * it on each ios seems to solve the problem.
890 */ 1075 */
891 if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1076 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
892 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1077 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
893 1078
1079out:
894 mmiowb(); 1080 mmiowb();
895 spin_unlock_irqrestore(&host->lock, flags); 1081 spin_unlock_irqrestore(&host->lock, flags);
896} 1082}
@@ -905,7 +1091,10 @@ static int sdhci_get_ro(struct mmc_host *mmc)
905 1091
906 spin_lock_irqsave(&host->lock, flags); 1092 spin_lock_irqsave(&host->lock, flags);
907 1093
908 present = readl(host->ioaddr + SDHCI_PRESENT_STATE); 1094 if (host->flags & SDHCI_DEVICE_DEAD)
1095 present = 0;
1096 else
1097 present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
909 1098
910 spin_unlock_irqrestore(&host->lock, flags); 1099 spin_unlock_irqrestore(&host->lock, flags);
911 1100
@@ -922,6 +1111,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
922 1111
923 spin_lock_irqsave(&host->lock, flags); 1112 spin_lock_irqsave(&host->lock, flags);
924 1113
1114 if (host->flags & SDHCI_DEVICE_DEAD)
1115 goto out;
1116
925 ier = readl(host->ioaddr + SDHCI_INT_ENABLE); 1117 ier = readl(host->ioaddr + SDHCI_INT_ENABLE);
926 1118
927 ier &= ~SDHCI_INT_CARD_INT; 1119 ier &= ~SDHCI_INT_CARD_INT;
@@ -931,6 +1123,7 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
931 writel(ier, host->ioaddr + SDHCI_INT_ENABLE); 1123 writel(ier, host->ioaddr + SDHCI_INT_ENABLE);
932 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE); 1124 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
933 1125
1126out:
934 mmiowb(); 1127 mmiowb();
935 1128
936 spin_unlock_irqrestore(&host->lock, flags); 1129 spin_unlock_irqrestore(&host->lock, flags);
@@ -996,13 +1189,14 @@ static void sdhci_tasklet_finish(unsigned long param)
996 * The controller needs a reset of internal state machines 1189 * The controller needs a reset of internal state machines
997 * upon error conditions. 1190 * upon error conditions.
998 */ 1191 */
999 if (mrq->cmd->error || 1192 if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1000 (mrq->data && (mrq->data->error || 1193 (mrq->cmd->error ||
1001 (mrq->data->stop && mrq->data->stop->error))) || 1194 (mrq->data && (mrq->data->error ||
1002 (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { 1195 (mrq->data->stop && mrq->data->stop->error))) ||
1196 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
1003 1197
1004 /* Some controllers need this kick or reset won't work here */ 1198 /* Some controllers need this kick or reset won't work here */
1005 if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { 1199 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
1006 unsigned int clock; 1200 unsigned int clock;
1007 1201
1008 /* This is to force an update */ 1202 /* This is to force an update */
@@ -1116,6 +1310,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1116 host->data->error = -ETIMEDOUT; 1310 host->data->error = -ETIMEDOUT;
1117 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1311 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1118 host->data->error = -EILSEQ; 1312 host->data->error = -EILSEQ;
1313 else if (intmask & SDHCI_INT_ADMA_ERROR)
1314 host->data->error = -EIO;
1119 1315
1120 if (host->data->error) 1316 if (host->data->error)
1121 sdhci_finish_data(host); 1317 sdhci_finish_data(host);
@@ -1234,218 +1430,167 @@ out:
1234 1430
1235#ifdef CONFIG_PM 1431#ifdef CONFIG_PM
1236 1432
1237static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state) 1433int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1238{ 1434{
1239 struct sdhci_chip *chip; 1435 int ret;
1240 int i, ret;
1241
1242 chip = pci_get_drvdata(pdev);
1243 if (!chip)
1244 return 0;
1245
1246 DBG("Suspending...\n");
1247
1248 for (i = 0;i < chip->num_slots;i++) {
1249 if (!chip->hosts[i])
1250 continue;
1251 ret = mmc_suspend_host(chip->hosts[i]->mmc, state);
1252 if (ret) {
1253 for (i--;i >= 0;i--)
1254 mmc_resume_host(chip->hosts[i]->mmc);
1255 return ret;
1256 }
1257 }
1258
1259 pci_save_state(pdev);
1260 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1261 1436
1262 for (i = 0;i < chip->num_slots;i++) { 1437 ret = mmc_suspend_host(host->mmc, state);
1263 if (!chip->hosts[i]) 1438 if (ret)
1264 continue; 1439 return ret;
1265 free_irq(chip->hosts[i]->irq, chip->hosts[i]);
1266 }
1267 1440
1268 pci_disable_device(pdev); 1441 free_irq(host->irq, host);
1269 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1270 1442
1271 return 0; 1443 return 0;
1272} 1444}
1273 1445
1274static int sdhci_resume (struct pci_dev *pdev) 1446EXPORT_SYMBOL_GPL(sdhci_suspend_host);
1275{
1276 struct sdhci_chip *chip;
1277 int i, ret;
1278 1447
1279 chip = pci_get_drvdata(pdev); 1448int sdhci_resume_host(struct sdhci_host *host)
1280 if (!chip) 1449{
1281 return 0; 1450 int ret;
1282 1451
1283 DBG("Resuming...\n"); 1452 if (host->flags & SDHCI_USE_DMA) {
1453 if (host->ops->enable_dma)
1454 host->ops->enable_dma(host);
1455 }
1284 1456
1285 pci_set_power_state(pdev, PCI_D0); 1457 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1286 pci_restore_state(pdev); 1458 mmc_hostname(host->mmc), host);
1287 ret = pci_enable_device(pdev);
1288 if (ret) 1459 if (ret)
1289 return ret; 1460 return ret;
1290 1461
1291 for (i = 0;i < chip->num_slots;i++) { 1462 sdhci_init(host);
1292 if (!chip->hosts[i]) 1463 mmiowb();
1293 continue; 1464
1294 if (chip->hosts[i]->flags & SDHCI_USE_DMA) 1465 ret = mmc_resume_host(host->mmc);
1295 pci_set_master(pdev); 1466 if (ret)
1296 ret = request_irq(chip->hosts[i]->irq, sdhci_irq, 1467 return ret;
1297 IRQF_SHARED, mmc_hostname(chip->hosts[i]->mmc),
1298 chip->hosts[i]);
1299 if (ret)
1300 return ret;
1301 sdhci_init(chip->hosts[i]);
1302 mmiowb();
1303 ret = mmc_resume_host(chip->hosts[i]->mmc);
1304 if (ret)
1305 return ret;
1306 }
1307 1468
1308 return 0; 1469 return 0;
1309} 1470}
1310 1471
1311#else /* CONFIG_PM */ 1472EXPORT_SYMBOL_GPL(sdhci_resume_host);
1312
1313#define sdhci_suspend NULL
1314#define sdhci_resume NULL
1315 1473
1316#endif /* CONFIG_PM */ 1474#endif /* CONFIG_PM */
1317 1475
1318/*****************************************************************************\ 1476/*****************************************************************************\
1319 * * 1477 * *
1320 * Device probing/removal * 1478 * Device allocation/registration *
1321 * * 1479 * *
1322\*****************************************************************************/ 1480\*****************************************************************************/
1323 1481
1324static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) 1482struct sdhci_host *sdhci_alloc_host(struct device *dev,
1483 size_t priv_size)
1325{ 1484{
1326 int ret;
1327 unsigned int version;
1328 struct sdhci_chip *chip;
1329 struct mmc_host *mmc; 1485 struct mmc_host *mmc;
1330 struct sdhci_host *host; 1486 struct sdhci_host *host;
1331 1487
1332 u8 first_bar; 1488 WARN_ON(dev == NULL);
1333 unsigned int caps;
1334
1335 chip = pci_get_drvdata(pdev);
1336 BUG_ON(!chip);
1337
1338 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
1339 if (ret)
1340 return ret;
1341
1342 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
1343
1344 if (first_bar > 5) {
1345 printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n");
1346 return -ENODEV;
1347 }
1348
1349 if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) {
1350 printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n");
1351 return -ENODEV;
1352 }
1353
1354 if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
1355 printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
1356 "You may experience problems.\n");
1357 }
1358
1359 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1360 printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
1361 return -ENODEV;
1362 }
1363
1364 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
1365 printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
1366 return -ENODEV;
1367 }
1368 1489
1369 mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev); 1490 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
1370 if (!mmc) 1491 if (!mmc)
1371 return -ENOMEM; 1492 return ERR_PTR(-ENOMEM);
1372 1493
1373 host = mmc_priv(mmc); 1494 host = mmc_priv(mmc);
1374 host->mmc = mmc; 1495 host->mmc = mmc;
1375 1496
1376 host->chip = chip; 1497 return host;
1377 chip->hosts[slot] = host; 1498}
1378 1499
1379 host->bar = first_bar + slot; 1500EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1380 1501
1381 host->addr = pci_resource_start(pdev, host->bar); 1502int sdhci_add_host(struct sdhci_host *host)
1382 host->irq = pdev->irq; 1503{
1504 struct mmc_host *mmc;
1505 unsigned int caps;
1506 int ret;
1383 1507
1384 DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq); 1508 WARN_ON(host == NULL);
1509 if (host == NULL)
1510 return -EINVAL;
1385 1511
1386 ret = pci_request_region(pdev, host->bar, mmc_hostname(mmc)); 1512 mmc = host->mmc;
1387 if (ret)
1388 goto free;
1389 1513
1390 host->ioaddr = ioremap_nocache(host->addr, 1514 if (debug_quirks)
1391 pci_resource_len(pdev, host->bar)); 1515 host->quirks = debug_quirks;
1392 if (!host->ioaddr) {
1393 ret = -ENOMEM;
1394 goto release;
1395 }
1396 1516
1397 sdhci_reset(host, SDHCI_RESET_ALL); 1517 sdhci_reset(host, SDHCI_RESET_ALL);
1398 1518
1399 version = readw(host->ioaddr + SDHCI_HOST_VERSION); 1519 host->version = readw(host->ioaddr + SDHCI_HOST_VERSION);
1400 version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 1520 host->version = (host->version & SDHCI_SPEC_VER_MASK)
1401 if (version > 1) { 1521 >> SDHCI_SPEC_VER_SHIFT;
1522 if (host->version > SDHCI_SPEC_200) {
1402 printk(KERN_ERR "%s: Unknown controller version (%d). " 1523 printk(KERN_ERR "%s: Unknown controller version (%d). "
1403 "You may experience problems.\n", mmc_hostname(mmc), 1524 "You may experience problems.\n", mmc_hostname(mmc),
1404 version); 1525 host->version);
1405 } 1526 }
1406 1527
1407 caps = readl(host->ioaddr + SDHCI_CAPABILITIES); 1528 caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
1408 1529
1409 if (chip->quirks & SDHCI_QUIRK_FORCE_DMA) 1530 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1410 host->flags |= SDHCI_USE_DMA; 1531 host->flags |= SDHCI_USE_DMA;
1411 else if (!(caps & SDHCI_CAN_DO_DMA)) 1532 else if (!(caps & SDHCI_CAN_DO_DMA))
1412 DBG("Controller doesn't have DMA capability\n"); 1533 DBG("Controller doesn't have DMA capability\n");
1413 else 1534 else
1414 host->flags |= SDHCI_USE_DMA; 1535 host->flags |= SDHCI_USE_DMA;
1415 1536
1416 if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) && 1537 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1417 (host->flags & SDHCI_USE_DMA)) { 1538 (host->flags & SDHCI_USE_DMA)) {
1418 DBG("Disabling DMA as it is marked broken\n"); 1539 DBG("Disabling DMA as it is marked broken\n");
1419 host->flags &= ~SDHCI_USE_DMA; 1540 host->flags &= ~SDHCI_USE_DMA;
1420 } 1541 }
1421 1542
1422 if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && 1543 if (host->flags & SDHCI_USE_DMA) {
1423 (host->flags & SDHCI_USE_DMA)) { 1544 if ((host->version >= SDHCI_SPEC_200) &&
1424 printk(KERN_WARNING "%s: Will use DMA " 1545 (caps & SDHCI_CAN_DO_ADMA2))
1425 "mode even though HW doesn't fully " 1546 host->flags |= SDHCI_USE_ADMA;
1426 "claim to support it.\n", mmc_hostname(mmc)); 1547 }
1548
1549 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1550 (host->flags & SDHCI_USE_ADMA)) {
1551 DBG("Disabling ADMA as it is marked broken\n");
1552 host->flags &= ~SDHCI_USE_ADMA;
1427 } 1553 }
1428 1554
1429 if (host->flags & SDHCI_USE_DMA) { 1555 if (host->flags & SDHCI_USE_DMA) {
1430 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 1556 if (host->ops->enable_dma) {
1431 printk(KERN_WARNING "%s: No suitable DMA available. " 1557 if (host->ops->enable_dma(host)) {
1432 "Falling back to PIO.\n", mmc_hostname(mmc)); 1558 printk(KERN_WARNING "%s: No suitable DMA "
1433 host->flags &= ~SDHCI_USE_DMA; 1559 "available. Falling back to PIO.\n",
1560 mmc_hostname(mmc));
1561 host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
1562 }
1434 } 1563 }
1435 } 1564 }
1436 1565
1437 if (host->flags & SDHCI_USE_DMA) 1566 if (host->flags & SDHCI_USE_ADMA) {
1438 pci_set_master(pdev); 1567 /*
1439 else /* XXX: Hack to get MMC layer to avoid highmem */ 1568 * We need to allocate descriptors for all sg entries
1440 pdev->dma_mask = 0; 1569 * (128) and potentially one alignment transfer for
1570 * each of those entries.
1571 */
1572 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1573 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1574 if (!host->adma_desc || !host->align_buffer) {
1575 kfree(host->adma_desc);
1576 kfree(host->align_buffer);
1577 printk(KERN_WARNING "%s: Unable to allocate ADMA "
1578 "buffers. Falling back to standard DMA.\n",
1579 mmc_hostname(mmc));
1580 host->flags &= ~SDHCI_USE_ADMA;
1581 }
1582 }
1583
1584 /* XXX: Hack to get MMC layer to avoid highmem */
1585 if (!(host->flags & SDHCI_USE_DMA))
1586 mmc_dev(host->mmc)->dma_mask = NULL;
1441 1587
1442 host->max_clk = 1588 host->max_clk =
1443 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1589 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1444 if (host->max_clk == 0) { 1590 if (host->max_clk == 0) {
1445 printk(KERN_ERR "%s: Hardware doesn't specify base clock " 1591 printk(KERN_ERR "%s: Hardware doesn't specify base clock "
1446 "frequency.\n", mmc_hostname(mmc)); 1592 "frequency.\n", mmc_hostname(mmc));
1447 ret = -ENODEV; 1593 return -ENODEV;
1448 goto unmap;
1449 } 1594 }
1450 host->max_clk *= 1000000; 1595 host->max_clk *= 1000000;
1451 1596
@@ -1454,8 +1599,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1454 if (host->timeout_clk == 0) { 1599 if (host->timeout_clk == 0) {
1455 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock " 1600 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
1456 "frequency.\n", mmc_hostname(mmc)); 1601 "frequency.\n", mmc_hostname(mmc));
1457 ret = -ENODEV; 1602 return -ENODEV;
1458 goto unmap;
1459 } 1603 }
1460 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1604 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1461 host->timeout_clk *= 1000; 1605 host->timeout_clk *= 1000;
@@ -1466,7 +1610,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1466 mmc->ops = &sdhci_ops; 1610 mmc->ops = &sdhci_ops;
1467 mmc->f_min = host->max_clk / 256; 1611 mmc->f_min = host->max_clk / 256;
1468 mmc->f_max = host->max_clk; 1612 mmc->f_max = host->max_clk;
1469 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ; 1613 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1470 1614
1471 if (caps & SDHCI_CAN_DO_HISPD) 1615 if (caps & SDHCI_CAN_DO_HISPD)
1472 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1616 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
@@ -1482,20 +1626,22 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1482 if (mmc->ocr_avail == 0) { 1626 if (mmc->ocr_avail == 0) {
1483 printk(KERN_ERR "%s: Hardware doesn't report any " 1627 printk(KERN_ERR "%s: Hardware doesn't report any "
1484 "support voltages.\n", mmc_hostname(mmc)); 1628 "support voltages.\n", mmc_hostname(mmc));
1485 ret = -ENODEV; 1629 return -ENODEV;
1486 goto unmap;
1487 } 1630 }
1488 1631
1489 spin_lock_init(&host->lock); 1632 spin_lock_init(&host->lock);
1490 1633
1491 /* 1634 /*
1492 * Maximum number of segments. Hardware cannot do scatter lists. 1635 * Maximum number of segments. Depends on if the hardware
1636 * can do scatter/gather or not.
1493 */ 1637 */
1494 if (host->flags & SDHCI_USE_DMA) 1638 if (host->flags & SDHCI_USE_ADMA)
1639 mmc->max_hw_segs = 128;
1640 else if (host->flags & SDHCI_USE_DMA)
1495 mmc->max_hw_segs = 1; 1641 mmc->max_hw_segs = 1;
1496 else 1642 else /* PIO */
1497 mmc->max_hw_segs = 16; 1643 mmc->max_hw_segs = 128;
1498 mmc->max_phys_segs = 16; 1644 mmc->max_phys_segs = 128;
1499 1645
1500 /* 1646 /*
1501 * Maximum number of sectors in one transfer. Limited by DMA boundary 1647 * Maximum number of sectors in one transfer. Limited by DMA boundary
@@ -1505,9 +1651,13 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1505 1651
1506 /* 1652 /*
1507 * Maximum segment size. Could be one segment with the maximum number 1653 * Maximum segment size. Could be one segment with the maximum number
1508 * of bytes. 1654 * of bytes. When doing hardware scatter/gather, each entry cannot
1655 * be larger than 64 KiB though.
1509 */ 1656 */
1510 mmc->max_seg_size = mmc->max_req_size; 1657 if (host->flags & SDHCI_USE_ADMA)
1658 mmc->max_seg_size = 65536;
1659 else
1660 mmc->max_seg_size = mmc->max_req_size;
1511 1661
1512 /* 1662 /*
1513 * Maximum block size. This varies from controller to controller and 1663 * Maximum block size. This varies from controller to controller and
@@ -1553,7 +1703,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1553 host->led.default_trigger = mmc_hostname(mmc); 1703 host->led.default_trigger = mmc_hostname(mmc);
1554 host->led.brightness_set = sdhci_led_control; 1704 host->led.brightness_set = sdhci_led_control;
1555 1705
1556 ret = led_classdev_register(&pdev->dev, &host->led); 1706 ret = led_classdev_register(mmc_dev(mmc), &host->led);
1557 if (ret) 1707 if (ret)
1558 goto reset; 1708 goto reset;
1559#endif 1709#endif
@@ -1562,8 +1712,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1562 1712
1563 mmc_add_host(mmc); 1713 mmc_add_host(mmc);
1564 1714
1565 printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", 1715 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
1566 mmc_hostname(mmc), host->addr, host->irq, 1716 mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id,
1717 (host->flags & SDHCI_USE_ADMA)?"A":"",
1567 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); 1718 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1568 1719
1569 return 0; 1720 return 0;
@@ -1576,35 +1727,40 @@ reset:
1576untasklet: 1727untasklet:
1577 tasklet_kill(&host->card_tasklet); 1728 tasklet_kill(&host->card_tasklet);
1578 tasklet_kill(&host->finish_tasklet); 1729 tasklet_kill(&host->finish_tasklet);
1579unmap:
1580 iounmap(host->ioaddr);
1581release:
1582 pci_release_region(pdev, host->bar);
1583free:
1584 mmc_free_host(mmc);
1585 1730
1586 return ret; 1731 return ret;
1587} 1732}
1588 1733
1589static void sdhci_remove_slot(struct pci_dev *pdev, int slot) 1734EXPORT_SYMBOL_GPL(sdhci_add_host);
1735
1736void sdhci_remove_host(struct sdhci_host *host, int dead)
1590{ 1737{
1591 struct sdhci_chip *chip; 1738 unsigned long flags;
1592 struct mmc_host *mmc;
1593 struct sdhci_host *host;
1594 1739
1595 chip = pci_get_drvdata(pdev); 1740 if (dead) {
1596 host = chip->hosts[slot]; 1741 spin_lock_irqsave(&host->lock, flags);
1597 mmc = host->mmc; 1742
1743 host->flags |= SDHCI_DEVICE_DEAD;
1744
1745 if (host->mrq) {
1746 printk(KERN_ERR "%s: Controller removed during "
1747 " transfer!\n", mmc_hostname(host->mmc));
1598 1748
1599 chip->hosts[slot] = NULL; 1749 host->mrq->cmd->error = -ENOMEDIUM;
1750 tasklet_schedule(&host->finish_tasklet);
1751 }
1752
1753 spin_unlock_irqrestore(&host->lock, flags);
1754 }
1600 1755
1601 mmc_remove_host(mmc); 1756 mmc_remove_host(host->mmc);
1602 1757
1603#ifdef CONFIG_LEDS_CLASS 1758#ifdef CONFIG_LEDS_CLASS
1604 led_classdev_unregister(&host->led); 1759 led_classdev_unregister(&host->led);
1605#endif 1760#endif
1606 1761
1607 sdhci_reset(host, SDHCI_RESET_ALL); 1762 if (!dead)
1763 sdhci_reset(host, SDHCI_RESET_ALL);
1608 1764
1609 free_irq(host->irq, host); 1765 free_irq(host->irq, host);
1610 1766
@@ -1613,106 +1769,21 @@ static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
1613 tasklet_kill(&host->card_tasklet); 1769 tasklet_kill(&host->card_tasklet);
1614 tasklet_kill(&host->finish_tasklet); 1770 tasklet_kill(&host->finish_tasklet);
1615 1771
1616 iounmap(host->ioaddr); 1772 kfree(host->adma_desc);
1617 1773 kfree(host->align_buffer);
1618 pci_release_region(pdev, host->bar);
1619 1774
1620 mmc_free_host(mmc); 1775 host->adma_desc = NULL;
1776 host->align_buffer = NULL;
1621} 1777}
1622 1778
1623static int __devinit sdhci_probe(struct pci_dev *pdev, 1779EXPORT_SYMBOL_GPL(sdhci_remove_host);
1624 const struct pci_device_id *ent)
1625{
1626 int ret, i;
1627 u8 slots, rev;
1628 struct sdhci_chip *chip;
1629
1630 BUG_ON(pdev == NULL);
1631 BUG_ON(ent == NULL);
1632 1780
1633 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); 1781void sdhci_free_host(struct sdhci_host *host)
1634
1635 printk(KERN_INFO DRIVER_NAME
1636 ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
1637 pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
1638 (int)rev);
1639
1640 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
1641 if (ret)
1642 return ret;
1643
1644 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
1645 DBG("found %d slot(s)\n", slots);
1646 if (slots == 0)
1647 return -ENODEV;
1648
1649 ret = pci_enable_device(pdev);
1650 if (ret)
1651 return ret;
1652
1653 chip = kzalloc(sizeof(struct sdhci_chip) +
1654 sizeof(struct sdhci_host*) * slots, GFP_KERNEL);
1655 if (!chip) {
1656 ret = -ENOMEM;
1657 goto err;
1658 }
1659
1660 chip->pdev = pdev;
1661 chip->quirks = ent->driver_data;
1662
1663 if (debug_quirks)
1664 chip->quirks = debug_quirks;
1665
1666 chip->num_slots = slots;
1667 pci_set_drvdata(pdev, chip);
1668
1669 for (i = 0;i < slots;i++) {
1670 ret = sdhci_probe_slot(pdev, i);
1671 if (ret) {
1672 for (i--;i >= 0;i--)
1673 sdhci_remove_slot(pdev, i);
1674 goto free;
1675 }
1676 }
1677
1678 return 0;
1679
1680free:
1681 pci_set_drvdata(pdev, NULL);
1682 kfree(chip);
1683
1684err:
1685 pci_disable_device(pdev);
1686 return ret;
1687}
1688
1689static void __devexit sdhci_remove(struct pci_dev *pdev)
1690{ 1782{
1691 int i; 1783 mmc_free_host(host->mmc);
1692 struct sdhci_chip *chip;
1693
1694 chip = pci_get_drvdata(pdev);
1695
1696 if (chip) {
1697 for (i = 0;i < chip->num_slots;i++)
1698 sdhci_remove_slot(pdev, i);
1699
1700 pci_set_drvdata(pdev, NULL);
1701
1702 kfree(chip);
1703 }
1704
1705 pci_disable_device(pdev);
1706} 1784}
1707 1785
1708static struct pci_driver sdhci_driver = { 1786EXPORT_SYMBOL_GPL(sdhci_free_host);
1709 .name = DRIVER_NAME,
1710 .id_table = pci_ids,
1711 .probe = sdhci_probe,
1712 .remove = __devexit_p(sdhci_remove),
1713 .suspend = sdhci_suspend,
1714 .resume = sdhci_resume,
1715};
1716 1787
1717/*****************************************************************************\ 1788/*****************************************************************************\
1718 * * 1789 * *
@@ -1726,14 +1797,11 @@ static int __init sdhci_drv_init(void)
1726 ": Secure Digital Host Controller Interface driver\n"); 1797 ": Secure Digital Host Controller Interface driver\n");
1727 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 1798 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1728 1799
1729 return pci_register_driver(&sdhci_driver); 1800 return 0;
1730} 1801}
1731 1802
1732static void __exit sdhci_drv_exit(void) 1803static void __exit sdhci_drv_exit(void)
1733{ 1804{
1734 DBG("Exiting\n");
1735
1736 pci_unregister_driver(&sdhci_driver);
1737} 1805}
1738 1806
1739module_init(sdhci_drv_init); 1807module_init(sdhci_drv_init);
@@ -1742,7 +1810,7 @@ module_exit(sdhci_drv_exit);
1742module_param(debug_quirks, uint, 0444); 1810module_param(debug_quirks, uint, 0444);
1743 1811
1744MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 1812MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
1745MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver"); 1813MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
1746MODULE_LICENSE("GPL"); 1814MODULE_LICENSE("GPL");
1747 1815
1748MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 1816MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 299118de8933..5bb355281765 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -10,18 +10,6 @@
10 */ 10 */
11 11
12/* 12/*
13 * PCI registers
14 */
15
16#define PCI_SDHCI_IFPIO 0x00
17#define PCI_SDHCI_IFDMA 0x01
18#define PCI_SDHCI_IFVENDOR 0x02
19
20#define PCI_SLOT_INFO 0x40 /* 8 bits */
21#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
22#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
23
24/*
25 * Controller registers 13 * Controller registers
26 */ 14 */
27 15
@@ -72,6 +60,11 @@
72#define SDHCI_CTRL_LED 0x01 60#define SDHCI_CTRL_LED 0x01
73#define SDHCI_CTRL_4BITBUS 0x02 61#define SDHCI_CTRL_4BITBUS 0x02
74#define SDHCI_CTRL_HISPD 0x04 62#define SDHCI_CTRL_HISPD 0x04
63#define SDHCI_CTRL_DMA_MASK 0x18
64#define SDHCI_CTRL_SDMA 0x00
65#define SDHCI_CTRL_ADMA1 0x08
66#define SDHCI_CTRL_ADMA32 0x10
67#define SDHCI_CTRL_ADMA64 0x18
75 68
76#define SDHCI_POWER_CONTROL 0x29 69#define SDHCI_POWER_CONTROL 0x29
77#define SDHCI_POWER_ON 0x01 70#define SDHCI_POWER_ON 0x01
@@ -117,6 +110,7 @@
117#define SDHCI_INT_DATA_END_BIT 0x00400000 110#define SDHCI_INT_DATA_END_BIT 0x00400000
118#define SDHCI_INT_BUS_POWER 0x00800000 111#define SDHCI_INT_BUS_POWER 0x00800000
119#define SDHCI_INT_ACMD12ERR 0x01000000 112#define SDHCI_INT_ACMD12ERR 0x01000000
113#define SDHCI_INT_ADMA_ERROR 0x02000000
120 114
121#define SDHCI_INT_NORMAL_MASK 0x00007FFF 115#define SDHCI_INT_NORMAL_MASK 0x00007FFF
122#define SDHCI_INT_ERROR_MASK 0xFFFF8000 116#define SDHCI_INT_ERROR_MASK 0xFFFF8000
@@ -140,11 +134,14 @@
140#define SDHCI_CLOCK_BASE_SHIFT 8 134#define SDHCI_CLOCK_BASE_SHIFT 8
141#define SDHCI_MAX_BLOCK_MASK 0x00030000 135#define SDHCI_MAX_BLOCK_MASK 0x00030000
142#define SDHCI_MAX_BLOCK_SHIFT 16 136#define SDHCI_MAX_BLOCK_SHIFT 16
137#define SDHCI_CAN_DO_ADMA2 0x00080000
138#define SDHCI_CAN_DO_ADMA1 0x00100000
143#define SDHCI_CAN_DO_HISPD 0x00200000 139#define SDHCI_CAN_DO_HISPD 0x00200000
144#define SDHCI_CAN_DO_DMA 0x00400000 140#define SDHCI_CAN_DO_DMA 0x00400000
145#define SDHCI_CAN_VDD_330 0x01000000 141#define SDHCI_CAN_VDD_330 0x01000000
146#define SDHCI_CAN_VDD_300 0x02000000 142#define SDHCI_CAN_VDD_300 0x02000000
147#define SDHCI_CAN_VDD_180 0x04000000 143#define SDHCI_CAN_VDD_180 0x04000000
144#define SDHCI_CAN_64BIT 0x10000000
148 145
149/* 44-47 reserved for more caps */ 146/* 44-47 reserved for more caps */
150 147
@@ -152,7 +149,16 @@
152 149
153/* 4C-4F reserved for more max current */ 150/* 4C-4F reserved for more max current */
154 151
155/* 50-FB reserved */ 152#define SDHCI_SET_ACMD12_ERROR 0x50
153#define SDHCI_SET_INT_ERROR 0x52
154
155#define SDHCI_ADMA_ERROR 0x54
156
157/* 55-57 reserved */
158
159#define SDHCI_ADMA_ADDRESS 0x58
160
161/* 60-FB reserved */
156 162
157#define SDHCI_SLOT_INT_STATUS 0xFC 163#define SDHCI_SLOT_INT_STATUS 0xFC
158 164
@@ -161,11 +167,50 @@
161#define SDHCI_VENDOR_VER_SHIFT 8 167#define SDHCI_VENDOR_VER_SHIFT 8
162#define SDHCI_SPEC_VER_MASK 0x00FF 168#define SDHCI_SPEC_VER_MASK 0x00FF
163#define SDHCI_SPEC_VER_SHIFT 0 169#define SDHCI_SPEC_VER_SHIFT 0
170#define SDHCI_SPEC_100 0
171#define SDHCI_SPEC_200 1
164 172
165struct sdhci_chip; 173struct sdhci_ops;
166 174
167struct sdhci_host { 175struct sdhci_host {
168 struct sdhci_chip *chip; 176 /* Data set by hardware interface driver */
177 const char *hw_name; /* Hardware bus name */
178
179 unsigned int quirks; /* Deviations from spec. */
180
181/* Controller doesn't honor resets unless we touch the clock register */
182#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
183/* Controller has bad caps bits, but really supports DMA */
184#define SDHCI_QUIRK_FORCE_DMA (1<<1)
185/* Controller doesn't like to be reset when there is no card inserted. */
186#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
187/* Controller doesn't like clearing the power reg before a change */
188#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
189/* Controller has flaky internal state so reset it on each ios change */
190#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
191/* Controller has an unusable DMA engine */
192#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
193/* Controller has an unusable ADMA engine */
194#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
195/* Controller can only DMA from 32-bit aligned addresses */
196#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
197/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
198#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
199/* Controller can only ADMA chunks that are a multiple of 32 bits */
200#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
201/* Controller needs to be reset after each request to stay stable */
202#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
203/* Controller needs voltage and power writes to happen separately */
204#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
205/* Controller provides an incorrect timeout value for transfers */
206#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
207
208 int irq; /* Device IRQ */
209 void __iomem * ioaddr; /* Mapped address */
210
211 const struct sdhci_ops *ops; /* Low level hw interface */
212
213 /* Internal data */
169 struct mmc_host *mmc; /* MMC structure */ 214 struct mmc_host *mmc; /* MMC structure */
170 215
171#ifdef CONFIG_LEDS_CLASS 216#ifdef CONFIG_LEDS_CLASS
@@ -176,7 +221,11 @@ struct sdhci_host {
176 221
177 int flags; /* Host attributes */ 222 int flags; /* Host attributes */
178#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ 223#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */
179#define SDHCI_REQ_USE_DMA (1<<1) /* Use DMA for this req. */ 224#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
225#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
226#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
227
228 unsigned int version; /* SDHCI spec. version */
180 229
181 unsigned int max_clk; /* Max possible freq (MHz) */ 230 unsigned int max_clk; /* Max possible freq (MHz) */
182 unsigned int timeout_clk; /* Timeout freq (KHz) */ 231 unsigned int timeout_clk; /* Timeout freq (KHz) */
@@ -194,22 +243,41 @@ struct sdhci_host {
194 int offset; /* Offset into current sg */ 243 int offset; /* Offset into current sg */
195 int remain; /* Bytes left in current */ 244 int remain; /* Bytes left in current */
196 245
197 int irq; /* Device IRQ */ 246 int sg_count; /* Mapped sg entries */
198 int bar; /* PCI BAR index */ 247
199 unsigned long addr; /* Bus address */ 248 u8 *adma_desc; /* ADMA descriptor table */
200 void __iomem * ioaddr; /* Mapped address */ 249 u8 *align_buffer; /* Bounce buffer */
250
251 dma_addr_t adma_addr; /* Mapped ADMA descr. table */
252 dma_addr_t align_addr; /* Mapped bounce buffer */
201 253
202 struct tasklet_struct card_tasklet; /* Tasklet structures */ 254 struct tasklet_struct card_tasklet; /* Tasklet structures */
203 struct tasklet_struct finish_tasklet; 255 struct tasklet_struct finish_tasklet;
204 256
205 struct timer_list timer; /* Timer for timeouts */ 257 struct timer_list timer; /* Timer for timeouts */
206};
207 258
208struct sdhci_chip { 259 unsigned long private[0] ____cacheline_aligned;
209 struct pci_dev *pdev; 260};
210 261
211 unsigned long quirks;
212 262
213 int num_slots; /* Slots on controller */ 263struct sdhci_ops {
214 struct sdhci_host *hosts[0]; /* Pointers to hosts */ 264 int (*enable_dma)(struct sdhci_host *host);
215}; 265};
266
267
268extern struct sdhci_host *sdhci_alloc_host(struct device *dev,
269 size_t priv_size);
270extern void sdhci_free_host(struct sdhci_host *host);
271
272static inline void *sdhci_priv(struct sdhci_host *host)
273{
274 return (void *)host->private;
275}
276
277extern int sdhci_add_host(struct sdhci_host *host);
278extern void sdhci_remove_host(struct sdhci_host *host, int dead);
279
280#ifdef CONFIG_PM
281extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
282extern int sdhci_resume_host(struct sdhci_host *host);
283#endif
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
new file mode 100644
index 000000000000..f99e9f721629
--- /dev/null
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -0,0 +1,575 @@
1/*
2 * sdricoh_cs.c - driver for Ricoh Secure Digital Card Readers that can be
3 * found on some Ricoh RL5c476 II cardbus bridge
4 *
5 * Copyright (C) 2006 - 2008 Sascha Sommer <saschasommer@freenet.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 */
22
23/*
24#define DEBUG
25#define VERBOSE_DEBUG
26*/
27#include <linux/delay.h>
28#include <linux/highmem.h>
29#include <linux/pci.h>
30#include <linux/ioport.h>
31#include <linux/scatterlist.h>
32#include <linux/version.h>
33
34#include <pcmcia/cs_types.h>
35#include <pcmcia/cs.h>
36#include <pcmcia/cistpl.h>
37#include <pcmcia/ds.h>
38#include <linux/io.h>
39
40#include <linux/mmc/host.h>
41
42#define DRIVER_NAME "sdricoh_cs"
43
44static unsigned int switchlocked;
45
46/* i/o region */
47#define SDRICOH_PCI_REGION 0
48#define SDRICOH_PCI_REGION_SIZE 0x1000
49
50/* registers */
51#define R104_VERSION 0x104
52#define R200_CMD 0x200
53#define R204_CMD_ARG 0x204
54#define R208_DATAIO 0x208
55#define R20C_RESP 0x20c
56#define R21C_STATUS 0x21c
57#define R2E0_INIT 0x2e0
58#define R2E4_STATUS_RESP 0x2e4
59#define R2F0_RESET 0x2f0
60#define R224_MODE 0x224
61#define R226_BLOCKSIZE 0x226
62#define R228_POWER 0x228
63#define R230_DATA 0x230
64
65/* flags for the R21C_STATUS register */
66#define STATUS_CMD_FINISHED 0x00000001
67#define STATUS_TRANSFER_FINISHED 0x00000004
68#define STATUS_CARD_INSERTED 0x00000020
69#define STATUS_CARD_LOCKED 0x00000080
70#define STATUS_CMD_TIMEOUT 0x00400000
71#define STATUS_READY_TO_READ 0x01000000
72#define STATUS_READY_TO_WRITE 0x02000000
73#define STATUS_BUSY 0x40000000
74
75/* timeouts */
76#define INIT_TIMEOUT 100
77#define CMD_TIMEOUT 100000
78#define TRANSFER_TIMEOUT 100000
79#define BUSY_TIMEOUT 32767
80
81/* list of supported pcmcia devices */
82static struct pcmcia_device_id pcmcia_ids[] = {
83 /* vendor and device strings followed by their crc32 hashes */
84 PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed,
85 0xc3901202),
86 PCMCIA_DEVICE_NULL,
87};
88
89MODULE_DEVICE_TABLE(pcmcia, pcmcia_ids);
90
91/* mmc privdata */
92struct sdricoh_host {
93 struct device *dev;
94 struct mmc_host *mmc; /* MMC structure */
95 unsigned char __iomem *iobase;
96 struct pci_dev *pci_dev;
97 int app_cmd;
98};
99
100/***************** register i/o helper functions *****************************/
101
102static inline unsigned int sdricoh_readl(struct sdricoh_host *host,
103 unsigned int reg)
104{
105 unsigned int value = readl(host->iobase + reg);
106 dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value);
107 return value;
108}
109
110static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg,
111 unsigned int value)
112{
113 writel(value, host->iobase + reg);
114 dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value);
115
116}
117
118static inline unsigned int sdricoh_readw(struct sdricoh_host *host,
119 unsigned int reg)
120{
121 unsigned int value = readw(host->iobase + reg);
122 dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value);
123 return value;
124}
125
126static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg,
127 unsigned short value)
128{
129 writew(value, host->iobase + reg);
130 dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value);
131}
132
133static inline unsigned int sdricoh_readb(struct sdricoh_host *host,
134 unsigned int reg)
135{
136 unsigned int value = readb(host->iobase + reg);
137 dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value);
138 return value;
139}
140
141static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted,
142 unsigned int timeout){
143 unsigned int loop;
144 unsigned int status = 0;
145 struct device *dev = host->dev;
146 for (loop = 0; loop < timeout; loop++) {
147 status = sdricoh_readl(host, R21C_STATUS);
148 sdricoh_writel(host, R2E4_STATUS_RESP, status);
149 if (status & wanted)
150 break;
151 }
152
153 if (loop == timeout) {
154 dev_err(dev, "query_status: timeout waiting for %x\n", wanted);
155 return -ETIMEDOUT;
156 }
157
158 /* do not do this check in the loop as some commands fail otherwise */
159 if (status & 0x7F0000) {
160 dev_err(dev, "waiting for status bit %x failed\n", wanted);
161 return -EINVAL;
162 }
163 return 0;
164
165}
166
167static int sdricoh_mmc_cmd(struct sdricoh_host *host, unsigned char opcode,
168 unsigned int arg)
169{
170 unsigned int status;
171 int result = 0;
172 unsigned int loop = 0;
173 /* reset status reg? */
174 sdricoh_writel(host, R21C_STATUS, 0x18);
175 /* fill parameters */
176 sdricoh_writel(host, R204_CMD_ARG, arg);
177 sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode);
178 /* wait for command completion */
179 if (opcode) {
180 for (loop = 0; loop < CMD_TIMEOUT; loop++) {
181 status = sdricoh_readl(host, R21C_STATUS);
182 sdricoh_writel(host, R2E4_STATUS_RESP, status);
183 if (status & STATUS_CMD_FINISHED)
184 break;
185 }
186 /* don't check for timeout in the loop it is not always
187 reset correctly
188 */
189 if (loop == CMD_TIMEOUT || status & STATUS_CMD_TIMEOUT)
190 result = -ETIMEDOUT;
191
192 }
193
194 return result;
195
196}
197
198static int sdricoh_reset(struct sdricoh_host *host)
199{
200 dev_dbg(host->dev, "reset\n");
201 sdricoh_writel(host, R2F0_RESET, 0x10001);
202 sdricoh_writel(host, R2E0_INIT, 0x10000);
203 if (sdricoh_readl(host, R2E0_INIT) != 0x10000)
204 return -EIO;
205 sdricoh_writel(host, R2E0_INIT, 0x10007);
206
207 sdricoh_writel(host, R224_MODE, 0x2000000);
208 sdricoh_writel(host, R228_POWER, 0xe0);
209
210
211 /* status register ? */
212 sdricoh_writel(host, R21C_STATUS, 0x18);
213
214 return 0;
215}
216
217static int sdricoh_blockio(struct sdricoh_host *host, int read,
218 u8 *buf, int len)
219{
220 int size;
221 u32 data = 0;
222 /* wait until the data is available */
223 if (read) {
224 if (sdricoh_query_status(host, STATUS_READY_TO_READ,
225 TRANSFER_TIMEOUT))
226 return -ETIMEDOUT;
227 sdricoh_writel(host, R21C_STATUS, 0x18);
228 /* read data */
229 while (len) {
230 data = sdricoh_readl(host, R230_DATA);
231 size = min(len, 4);
232 len -= size;
233 while (size) {
234 *buf = data & 0xFF;
235 buf++;
236 data >>= 8;
237 size--;
238 }
239 }
240 } else {
241 if (sdricoh_query_status(host, STATUS_READY_TO_WRITE,
242 TRANSFER_TIMEOUT))
243 return -ETIMEDOUT;
244 sdricoh_writel(host, R21C_STATUS, 0x18);
245 /* write data */
246 while (len) {
247 size = min(len, 4);
248 len -= size;
249 while (size) {
250 data >>= 8;
251 data |= (u32)*buf << 24;
252 buf++;
253 size--;
254 }
255 sdricoh_writel(host, R230_DATA, data);
256 }
257 }
258
259 if (len)
260 return -EIO;
261
262 return 0;
263}
264
265static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq)
266{
267 struct sdricoh_host *host = mmc_priv(mmc);
268 struct mmc_command *cmd = mrq->cmd;
269 struct mmc_data *data = cmd->data;
270 struct device *dev = host->dev;
271 unsigned char opcode = cmd->opcode;
272 int i;
273
274 dev_dbg(dev, "=============================\n");
275 dev_dbg(dev, "sdricoh_request opcode=%i\n", opcode);
276
277 sdricoh_writel(host, R21C_STATUS, 0x18);
278
279 /* MMC_APP_CMDs need some special handling */
280 if (host->app_cmd) {
281 opcode |= 64;
282 host->app_cmd = 0;
283 } else if (opcode == 55)
284 host->app_cmd = 1;
285
286 /* read/write commands seem to require this */
287 if (data) {
288 sdricoh_writew(host, R226_BLOCKSIZE, data->blksz);
289 sdricoh_writel(host, R208_DATAIO, 0);
290 }
291
292 cmd->error = sdricoh_mmc_cmd(host, opcode, cmd->arg);
293
294 /* read response buffer */
295 if (cmd->flags & MMC_RSP_PRESENT) {
296 if (cmd->flags & MMC_RSP_136) {
297 /* CRC is stripped so we need to do some shifting. */
298 for (i = 0; i < 4; i++) {
299 cmd->resp[i] =
300 sdricoh_readl(host,
301 R20C_RESP + (3 - i) * 4) << 8;
302 if (i != 3)
303 cmd->resp[i] |=
304 sdricoh_readb(host, R20C_RESP +
305 (3 - i) * 4 - 1);
306 }
307 } else
308 cmd->resp[0] = sdricoh_readl(host, R20C_RESP);
309 }
310
311 /* transfer data */
312 if (data && cmd->error == 0) {
313 dev_dbg(dev, "transfer: blksz %i blocks %i sg_len %i "
314 "sg length %i\n", data->blksz, data->blocks,
315 data->sg_len, data->sg->length);
316
317 /* enter data reading mode */
318 sdricoh_writel(host, R21C_STATUS, 0x837f031e);
319 for (i = 0; i < data->blocks; i++) {
320 size_t len = data->blksz;
321 u8 *buf;
322 struct page *page;
323 int result;
324 page = sg_page(data->sg);
325
326 buf = kmap(page) + data->sg->offset + (len * i);
327 result =
328 sdricoh_blockio(host,
329 data->flags & MMC_DATA_READ, buf, len);
330 kunmap(page);
331 flush_dcache_page(page);
332 if (result) {
333 dev_err(dev, "sdricoh_request: cmd %i "
334 "block transfer failed\n", cmd->opcode);
335 cmd->error = result;
336 break;
337 } else
338 data->bytes_xfered += len;
339 }
340
341 sdricoh_writel(host, R208_DATAIO, 1);
342
343 if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED,
344 TRANSFER_TIMEOUT)) {
345 dev_err(dev, "sdricoh_request: transfer end error\n");
346 cmd->error = -EINVAL;
347 }
348 }
349 /* FIXME check busy flag */
350
351 mmc_request_done(mmc, mrq);
352 dev_dbg(dev, "=============================\n");
353}
354
355static void sdricoh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
356{
357 struct sdricoh_host *host = mmc_priv(mmc);
358 dev_dbg(host->dev, "set_ios\n");
359
360 if (ios->power_mode == MMC_POWER_ON) {
361 sdricoh_writel(host, R228_POWER, 0xc0e0);
362
363 if (ios->bus_width == MMC_BUS_WIDTH_4) {
364 sdricoh_writel(host, R224_MODE, 0x2000300);
365 sdricoh_writel(host, R228_POWER, 0x40e0);
366 } else {
367 sdricoh_writel(host, R224_MODE, 0x2000340);
368 }
369
370 } else if (ios->power_mode == MMC_POWER_UP) {
371 sdricoh_writel(host, R224_MODE, 0x2000320);
372 sdricoh_writel(host, R228_POWER, 0xe0);
373 }
374}
375
376static int sdricoh_get_ro(struct mmc_host *mmc)
377{
378 struct sdricoh_host *host = mmc_priv(mmc);
379 unsigned int status;
380
381 status = sdricoh_readl(host, R21C_STATUS);
382 sdricoh_writel(host, R2E4_STATUS_RESP, status);
383
384 /* some notebooks seem to have the locked flag switched */
385 if (switchlocked)
386 return !(status & STATUS_CARD_LOCKED);
387
388 return (status & STATUS_CARD_LOCKED);
389}
390
391static struct mmc_host_ops sdricoh_ops = {
392 .request = sdricoh_request,
393 .set_ios = sdricoh_set_ios,
394 .get_ro = sdricoh_get_ro,
395};
396
397/* initialize the control and register it to the mmc framework */
398static int sdricoh_init_mmc(struct pci_dev *pci_dev,
399 struct pcmcia_device *pcmcia_dev)
400{
401 int result = 0;
402 void __iomem *iobase = NULL;
403 struct mmc_host *mmc = NULL;
404 struct sdricoh_host *host = NULL;
405 struct device *dev = &pcmcia_dev->dev;
406 /* map iomem */
407 if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) !=
408 SDRICOH_PCI_REGION_SIZE) {
409 dev_dbg(dev, "unexpected pci resource len\n");
410 return -ENODEV;
411 }
412 iobase =
413 pci_iomap(pci_dev, SDRICOH_PCI_REGION, SDRICOH_PCI_REGION_SIZE);
414 if (!iobase) {
415 dev_err(dev, "unable to map iobase\n");
416 return -ENODEV;
417 }
418 /* check version? */
419 if (readl(iobase + R104_VERSION) != 0x4000) {
420 dev_dbg(dev, "no supported mmc controller found\n");
421 result = -ENODEV;
422 goto err;
423 }
424 /* allocate privdata */
425 mmc = pcmcia_dev->priv =
426 mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev);
427 if (!mmc) {
428 dev_err(dev, "mmc_alloc_host failed\n");
429 result = -ENOMEM;
430 goto err;
431 }
432 host = mmc_priv(mmc);
433
434 host->iobase = iobase;
435 host->dev = dev;
436 host->pci_dev = pci_dev;
437
438 mmc->ops = &sdricoh_ops;
439
440 /* FIXME: frequency and voltage handling is done by the controller
441 */
442 mmc->f_min = 450000;
443 mmc->f_max = 24000000;
444 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
445 mmc->caps |= MMC_CAP_4_BIT_DATA;
446
447 mmc->max_seg_size = 1024 * 512;
448 mmc->max_blk_size = 512;
449
450 /* reset the controler */
451 if (sdricoh_reset(host)) {
452 dev_dbg(dev, "could not reset\n");
453 result = -EIO;
454 goto err;
455
456 }
457
458 result = mmc_add_host(mmc);
459
460 if (!result) {
461 dev_dbg(dev, "mmc host registered\n");
462 return 0;
463 }
464
465err:
466 if (iobase)
467 iounmap(iobase);
468 if (mmc)
469 mmc_free_host(mmc);
470
471 return result;
472}
473
474/* search for supported mmc controllers */
475static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev)
476{
477 struct pci_dev *pci_dev = NULL;
478
479 dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
480 " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
481
482 /* search pci cardbus bridge that contains the mmc controler */
483 /* the io region is already claimed by yenta_socket... */
484 while ((pci_dev =
485 pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
486 pci_dev))) {
487 /* try to init the device */
488 if (!sdricoh_init_mmc(pci_dev, pcmcia_dev)) {
489 dev_info(&pcmcia_dev->dev, "MMC controller found\n");
490 return 0;
491 }
492
493 }
494 dev_err(&pcmcia_dev->dev, "No MMC controller was found.\n");
495 return -ENODEV;
496}
497
498static void sdricoh_pcmcia_detach(struct pcmcia_device *link)
499{
500 struct mmc_host *mmc = link->priv;
501
502 dev_dbg(&link->dev, "detach\n");
503
504 /* remove mmc host */
505 if (mmc) {
506 struct sdricoh_host *host = mmc_priv(mmc);
507 mmc_remove_host(mmc);
508 pci_iounmap(host->pci_dev, host->iobase);
509 pci_dev_put(host->pci_dev);
510 mmc_free_host(mmc);
511 }
512 pcmcia_disable_device(link);
513
514}
515
516#ifdef CONFIG_PM
517static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
518{
519 struct mmc_host *mmc = link->priv;
520 dev_dbg(&link->dev, "suspend\n");
521 mmc_suspend_host(mmc, PMSG_SUSPEND);
522 return 0;
523}
524
525static int sdricoh_pcmcia_resume(struct pcmcia_device *link)
526{
527 struct mmc_host *mmc = link->priv;
528 dev_dbg(&link->dev, "resume\n");
529 sdricoh_reset(mmc_priv(mmc));
530 mmc_resume_host(mmc);
531 return 0;
532}
533#else
534#define sdricoh_pcmcia_suspend NULL
535#define sdricoh_pcmcia_resume NULL
536#endif
537
538static struct pcmcia_driver sdricoh_driver = {
539 .drv = {
540 .name = DRIVER_NAME,
541 },
542 .probe = sdricoh_pcmcia_probe,
543 .remove = sdricoh_pcmcia_detach,
544 .id_table = pcmcia_ids,
545 .suspend = sdricoh_pcmcia_suspend,
546 .resume = sdricoh_pcmcia_resume,
547};
548
549/*****************************************************************************\
550 * *
551 * Driver init/exit *
552 * *
553\*****************************************************************************/
554
555static int __init sdricoh_drv_init(void)
556{
557 return pcmcia_register_driver(&sdricoh_driver);
558}
559
560static void __exit sdricoh_drv_exit(void)
561{
562 pcmcia_unregister_driver(&sdricoh_driver);
563}
564
565module_init(sdricoh_drv_init);
566module_exit(sdricoh_drv_exit);
567
568module_param(switchlocked, uint, 0444);
569
570MODULE_AUTHOR("Sascha Sommer <saschasommer@freenet.de>");
571MODULE_DESCRIPTION("Ricoh PCMCIA Secure Digital Interface driver");
572MODULE_LICENSE("GPL");
573
574MODULE_PARM_DESC(switchlocked, "Switch the cards locked status."
575 "Use this when unlocked cards are shown readonly (default 0)");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 1c14a186f000..13844843e8de 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -973,7 +973,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
973 973
974 mmc->ops = &tifm_sd_ops; 974 mmc->ops = &tifm_sd_ops;
975 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 975 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
976 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; 976 mmc->caps = MMC_CAP_4_BIT_DATA;
977 mmc->f_min = 20000000 / 60; 977 mmc->f_min = 20000000 / 60;
978 mmc->f_max = 24000000; 978 mmc->f_max = 24000000;
979 979
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index c303e7f57ab4..adda37952032 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -68,16 +68,16 @@ static const int unlock_codes[] = { 0x83, 0x87 };
68 68
69static const int valid_ids[] = { 69static const int valid_ids[] = {
70 0x7112, 70 0x7112,
71 }; 71};
72 72
73#ifdef CONFIG_PNP 73#ifdef CONFIG_PNP
74static unsigned int nopnp = 0; 74static unsigned int param_nopnp = 0;
75#else 75#else
76static const unsigned int nopnp = 1; 76static const unsigned int param_nopnp = 1;
77#endif 77#endif
78static unsigned int io = 0x248; 78static unsigned int param_io = 0x248;
79static unsigned int irq = 6; 79static unsigned int param_irq = 6;
80static int dma = 2; 80static int param_dma = 2;
81 81
82/* 82/*
83 * Basic functions 83 * Basic functions
@@ -939,7 +939,7 @@ static int wbsd_get_ro(struct mmc_host *mmc)
939 939
940 spin_unlock_bh(&host->lock); 940 spin_unlock_bh(&host->lock);
941 941
942 return csr & WBSD_WRPT; 942 return !!(csr & WBSD_WRPT);
943} 943}
944 944
945static const struct mmc_host_ops wbsd_ops = { 945static const struct mmc_host_ops wbsd_ops = {
@@ -1219,7 +1219,7 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
1219 mmc->f_min = 375000; 1219 mmc->f_min = 375000;
1220 mmc->f_max = 24000000; 1220 mmc->f_max = 24000000;
1221 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1221 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1222 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; 1222 mmc->caps = MMC_CAP_4_BIT_DATA;
1223 1223
1224 spin_lock_init(&host->lock); 1224 spin_lock_init(&host->lock);
1225 1225
@@ -1420,7 +1420,7 @@ kfree:
1420 1420
1421 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, 1421 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
1422 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); 1422 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1423 host->dma_addr = (dma_addr_t)NULL; 1423 host->dma_addr = 0;
1424 1424
1425 kfree(host->dma_buffer); 1425 kfree(host->dma_buffer);
1426 host->dma_buffer = NULL; 1426 host->dma_buffer = NULL;
@@ -1445,7 +1445,7 @@ static void wbsd_release_dma(struct wbsd_host *host)
1445 1445
1446 host->dma = -1; 1446 host->dma = -1;
1447 host->dma_buffer = NULL; 1447 host->dma_buffer = NULL;
1448 host->dma_addr = (dma_addr_t)NULL; 1448 host->dma_addr = 0;
1449} 1449}
1450 1450
1451/* 1451/*
@@ -1765,7 +1765,7 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1765static int __devinit wbsd_probe(struct platform_device *dev) 1765static int __devinit wbsd_probe(struct platform_device *dev)
1766{ 1766{
1767 /* Use the module parameters for resources */ 1767 /* Use the module parameters for resources */
1768 return wbsd_init(&dev->dev, io, irq, dma, 0); 1768 return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);
1769} 1769}
1770 1770
1771static int __devexit wbsd_remove(struct platform_device *dev) 1771static int __devexit wbsd_remove(struct platform_device *dev)
@@ -1979,14 +1979,14 @@ static int __init wbsd_drv_init(void)
1979 1979
1980#ifdef CONFIG_PNP 1980#ifdef CONFIG_PNP
1981 1981
1982 if (!nopnp) { 1982 if (!param_nopnp) {
1983 result = pnp_register_driver(&wbsd_pnp_driver); 1983 result = pnp_register_driver(&wbsd_pnp_driver);
1984 if (result < 0) 1984 if (result < 0)
1985 return result; 1985 return result;
1986 } 1986 }
1987#endif /* CONFIG_PNP */ 1987#endif /* CONFIG_PNP */
1988 1988
1989 if (nopnp) { 1989 if (param_nopnp) {
1990 result = platform_driver_register(&wbsd_driver); 1990 result = platform_driver_register(&wbsd_driver);
1991 if (result < 0) 1991 if (result < 0)
1992 return result; 1992 return result;
@@ -2012,12 +2012,12 @@ static void __exit wbsd_drv_exit(void)
2012{ 2012{
2013#ifdef CONFIG_PNP 2013#ifdef CONFIG_PNP
2014 2014
2015 if (!nopnp) 2015 if (!param_nopnp)
2016 pnp_unregister_driver(&wbsd_pnp_driver); 2016 pnp_unregister_driver(&wbsd_pnp_driver);
2017 2017
2018#endif /* CONFIG_PNP */ 2018#endif /* CONFIG_PNP */
2019 2019
2020 if (nopnp) { 2020 if (param_nopnp) {
2021 platform_device_unregister(wbsd_device); 2021 platform_device_unregister(wbsd_device);
2022 2022
2023 platform_driver_unregister(&wbsd_driver); 2023 platform_driver_unregister(&wbsd_driver);
@@ -2029,11 +2029,11 @@ static void __exit wbsd_drv_exit(void)
2029module_init(wbsd_drv_init); 2029module_init(wbsd_drv_init);
2030module_exit(wbsd_drv_exit); 2030module_exit(wbsd_drv_exit);
2031#ifdef CONFIG_PNP 2031#ifdef CONFIG_PNP
2032module_param(nopnp, uint, 0444); 2032module_param_named(nopnp, param_nopnp, uint, 0444);
2033#endif 2033#endif
2034module_param(io, uint, 0444); 2034module_param_named(io, param_io, uint, 0444);
2035module_param(irq, uint, 0444); 2035module_param_named(irq, param_irq, uint, 0444);
2036module_param(dma, int, 0444); 2036module_param_named(dma, param_dma, int, 0444);
2037 2037
2038MODULE_LICENSE("GPL"); 2038MODULE_LICENSE("GPL");
2039MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); 2039MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 17bc87a43ff4..d2fbc2964523 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -258,13 +258,6 @@ config MTD_ALCHEMY
258 help 258 help
259 Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards 259 Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards
260 260
261config MTD_MTX1
262 tristate "4G Systems MTX-1 Flash device"
263 depends on MIPS_MTX1 && MTD_CFI
264 help
265 Flash memory access on 4G Systems MTX-1 Board. If you have one of
266 these boards and would like to use the flash chips on it, say 'Y'.
267
268config MTD_DILNETPC 261config MTD_DILNETPC
269 tristate "CFI Flash device mapped on DIL/Net PC" 262 tristate "CFI Flash device mapped on DIL/Net PC"
270 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT 263 depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 957fb5f70f5e..c6ce8673dab2 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -65,5 +65,4 @@ obj-$(CONFIG_MTD_DMV182) += dmv182.o
65obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o 65obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o
66obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o 66obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
67obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o 67obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
68obj-$(CONFIG_MTD_MTX1) += mtx-1_flash.o
69obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o 68obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
diff --git a/drivers/mtd/maps/mtx-1_flash.c b/drivers/mtd/maps/mtx-1_flash.c
deleted file mode 100644
index 2a8fde9b92f0..000000000000
--- a/drivers/mtd/maps/mtx-1_flash.c
+++ /dev/null
@@ -1,95 +0,0 @@
1/*
2 * Flash memory access on 4G Systems MTX-1 boards
3 *
4 * $Id: mtx-1_flash.c,v 1.2 2005/11/07 11:14:27 gleixner Exp $
5 *
6 * (C) 2005 Bruno Randolf <bruno.randolf@4g-systems.biz>
7 * (C) 2005 Joern Engel <joern@wohnheim.fh-wedel.de>
8 *
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20#include <asm/io.h>
21
22static struct map_info mtx1_map = {
23 .name = "MTX-1 flash",
24 .bankwidth = 4,
25 .size = 0x2000000,
26 .phys = 0x1E000000,
27};
28
29static struct mtd_partition mtx1_partitions[] = {
30 {
31 .name = "filesystem",
32 .size = 0x01C00000,
33 .offset = 0,
34 },{
35 .name = "yamon",
36 .size = 0x00100000,
37 .offset = MTDPART_OFS_APPEND,
38 .mask_flags = MTD_WRITEABLE,
39 },{
40 .name = "kernel",
41 .size = 0x002c0000,
42 .offset = MTDPART_OFS_APPEND,
43 },{
44 .name = "yamon env",
45 .size = 0x00040000,
46 .offset = MTDPART_OFS_APPEND,
47 }
48};
49
50static struct mtd_info *mtx1_mtd;
51
52int __init mtx1_mtd_init(void)
53{
54 int ret = -ENXIO;
55
56 simple_map_init(&mtx1_map);
57
58 mtx1_map.virt = ioremap(mtx1_map.phys, mtx1_map.size);
59 if (!mtx1_map.virt)
60 return -EIO;
61
62 mtx1_mtd = do_map_probe("cfi_probe", &mtx1_map);
63 if (!mtx1_mtd)
64 goto err;
65
66 mtx1_mtd->owner = THIS_MODULE;
67
68 ret = add_mtd_partitions(mtx1_mtd, mtx1_partitions,
69 ARRAY_SIZE(mtx1_partitions));
70 if (ret)
71 goto err;
72
73 return 0;
74
75err:
76 iounmap(mtx1_map.virt);
77 return ret;
78}
79
80static void __exit mtx1_mtd_cleanup(void)
81{
82 if (mtx1_mtd) {
83 del_mtd_partitions(mtx1_mtd);
84 map_destroy(mtx1_mtd);
85 }
86 if (mtx1_map.virt)
87 iounmap(mtx1_map.virt);
88}
89
90module_init(mtx1_mtd_init);
91module_exit(mtx1_mtd_cleanup);
92
93MODULE_AUTHOR("Bruno Randolf <bruno.randolf@4g-systems.biz>");
94MODULE_DESCRIPTION("MTX-1 flash map");
95MODULE_LICENSE("GPL");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 45a41b597da9..2683ee32fc11 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1884,7 +1884,6 @@ config NE_H8300
1884 Say Y here if you want to use the NE2000 compatible 1884 Say Y here if you want to use the NE2000 compatible
1885 controller on the Renesas H8/300 processor. 1885 controller on the Renesas H8/300 processor.
1886 1886
1887source "drivers/net/fec_8xx/Kconfig"
1888source "drivers/net/fs_enet/Kconfig" 1887source "drivers/net/fs_enet/Kconfig"
1889 1888
1890endif # NET_ETHERNET 1889endif # NET_ETHERNET
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index dcbfe8421154..9010e58da0f2 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -217,7 +217,6 @@ obj-$(CONFIG_SMC91X) += smc91x.o
217obj-$(CONFIG_SMC911X) += smc911x.o 217obj-$(CONFIG_SMC911X) += smc911x.o
218obj-$(CONFIG_BFIN_MAC) += bfin_mac.o 218obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
219obj-$(CONFIG_DM9000) += dm9000.o 219obj-$(CONFIG_DM9000) += dm9000.o
220obj-$(CONFIG_FEC_8XX) += fec_8xx/
221obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o 220obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
222pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o 221pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
223obj-$(CONFIG_MLX4_CORE) += mlx4/ 222obj-$(CONFIG_MLX4_CORE) += mlx4/
diff --git a/drivers/net/fec_8xx/Kconfig b/drivers/net/fec_8xx/Kconfig
deleted file mode 100644
index afb34ded26ee..000000000000
--- a/drivers/net/fec_8xx/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
1config FEC_8XX
2 tristate "Motorola 8xx FEC driver"
3 depends on 8XX
4 select MII
5
6config FEC_8XX_GENERIC_PHY
7 bool "Support any generic PHY"
8 depends on FEC_8XX
9 default y
10
11config FEC_8XX_DM9161_PHY
12 bool "Support DM9161 PHY"
13 depends on FEC_8XX
14 default n
15
16config FEC_8XX_LXT971_PHY
17 bool "Support LXT971/LXT972 PHY"
18 depends on FEC_8XX
19 default n
20
diff --git a/drivers/net/fec_8xx/Makefile b/drivers/net/fec_8xx/Makefile
deleted file mode 100644
index 70c54f8c48e5..000000000000
--- a/drivers/net/fec_8xx/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
1#
2# Makefile for the Motorola 8xx FEC ethernet controller
3#
4
5obj-$(CONFIG_FEC_8XX) += fec_8xx.o
6
7fec_8xx-objs := fec_main.o fec_mii.o
8
9# the platform instantatiation objects
10ifeq ($(CONFIG_NETTA),y)
11fec_8xx-objs += fec_8xx-netta.o
12endif
diff --git a/drivers/net/fec_8xx/fec_8xx-netta.c b/drivers/net/fec_8xx/fec_8xx-netta.c
deleted file mode 100644
index 79deee222e28..000000000000
--- a/drivers/net/fec_8xx/fec_8xx-netta.c
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * FEC instantatiation file for NETTA
3 */
4
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/string.h>
8#include <linux/ptrace.h>
9#include <linux/errno.h>
10#include <linux/ioport.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
13#include <linux/pci.h>
14#include <linux/init.h>
15#include <linux/delay.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/skbuff.h>
19#include <linux/spinlock.h>
20#include <linux/mii.h>
21#include <linux/ethtool.h>
22#include <linux/bitops.h>
23
24#include <asm/8xx_immap.h>
25#include <asm/pgtable.h>
26#include <asm/mpc8xx.h>
27#include <asm/irq.h>
28#include <asm/uaccess.h>
29#include <asm/cpm1.h>
30
31#include "fec_8xx.h"
32
33/*************************************************/
34
35static struct fec_platform_info fec1_info = {
36 .fec_no = 0,
37 .use_mdio = 1,
38 .phy_addr = 8,
39 .fec_irq = SIU_LEVEL1,
40 .phy_irq = CPM_IRQ_OFFSET + CPMVEC_PIO_PC6,
41 .rx_ring = 128,
42 .tx_ring = 16,
43 .rx_copybreak = 240,
44 .use_napi = 1,
45 .napi_weight = 17,
46};
47
48static struct fec_platform_info fec2_info = {
49 .fec_no = 1,
50 .use_mdio = 1,
51 .phy_addr = 2,
52 .fec_irq = SIU_LEVEL3,
53 .phy_irq = CPM_IRQ_OFFSET + CPMVEC_PIO_PC7,
54 .rx_ring = 128,
55 .tx_ring = 16,
56 .rx_copybreak = 240,
57 .use_napi = 1,
58 .napi_weight = 17,
59};
60
61static struct net_device *fec1_dev;
62static struct net_device *fec2_dev;
63
64/* XXX custom u-boot & Linux startup needed */
65extern const char *__fw_getenv(const char *var);
66
67/* access ports */
68#define setbits32(_addr, _v) __fec_out32(&(_addr), __fec_in32(&(_addr)) | (_v))
69#define clrbits32(_addr, _v) __fec_out32(&(_addr), __fec_in32(&(_addr)) & ~(_v))
70
71#define setbits16(_addr, _v) __fec_out16(&(_addr), __fec_in16(&(_addr)) | (_v))
72#define clrbits16(_addr, _v) __fec_out16(&(_addr), __fec_in16(&(_addr)) & ~(_v))
73
74int fec_8xx_platform_init(void)
75{
76 immap_t *immap = (immap_t *)IMAP_ADDR;
77 bd_t *bd = (bd_t *) __res;
78 const char *s;
79 char *e;
80 int i;
81
82 /* use MDC for MII */
83 setbits16(immap->im_ioport.iop_pdpar, 0x0080);
84 clrbits16(immap->im_ioport.iop_pddir, 0x0080);
85
86 /* configure FEC1 pins */
87 setbits16(immap->im_ioport.iop_papar, 0xe810);
88 setbits16(immap->im_ioport.iop_padir, 0x0810);
89 clrbits16(immap->im_ioport.iop_padir, 0xe000);
90
91 setbits32(immap->im_cpm.cp_pbpar, 0x00000001);
92 clrbits32(immap->im_cpm.cp_pbdir, 0x00000001);
93
94 setbits32(immap->im_cpm.cp_cptr, 0x00000100);
95 clrbits32(immap->im_cpm.cp_cptr, 0x00000050);
96
97 clrbits16(immap->im_ioport.iop_pcpar, 0x0200);
98 clrbits16(immap->im_ioport.iop_pcdir, 0x0200);
99 clrbits16(immap->im_ioport.iop_pcso, 0x0200);
100 setbits16(immap->im_ioport.iop_pcint, 0x0200);
101
102 /* configure FEC2 pins */
103 setbits32(immap->im_cpm.cp_pepar, 0x00039620);
104 setbits32(immap->im_cpm.cp_pedir, 0x00039620);
105 setbits32(immap->im_cpm.cp_peso, 0x00031000);
106 clrbits32(immap->im_cpm.cp_peso, 0x00008620);
107
108 setbits32(immap->im_cpm.cp_cptr, 0x00000080);
109 clrbits32(immap->im_cpm.cp_cptr, 0x00000028);
110
111 clrbits16(immap->im_ioport.iop_pcpar, 0x0200);
112 clrbits16(immap->im_ioport.iop_pcdir, 0x0200);
113 clrbits16(immap->im_ioport.iop_pcso, 0x0200);
114 setbits16(immap->im_ioport.iop_pcint, 0x0200);
115
116 /* fill up */
117 fec1_info.sys_clk = bd->bi_intfreq;
118 fec2_info.sys_clk = bd->bi_intfreq;
119
120 s = __fw_getenv("ethaddr");
121 if (s != NULL) {
122 for (i = 0; i < 6; i++) {
123 fec1_info.macaddr[i] = simple_strtoul(s, &e, 16);
124 if (*e)
125 s = e + 1;
126 }
127 }
128
129 s = __fw_getenv("eth1addr");
130 if (s != NULL) {
131 for (i = 0; i < 6; i++) {
132 fec2_info.macaddr[i] = simple_strtoul(s, &e, 16);
133 if (*e)
134 s = e + 1;
135 }
136 }
137
138 fec_8xx_init_one(&fec1_info, &fec1_dev);
139 fec_8xx_init_one(&fec2_info, &fec2_dev);
140
141 return fec1_dev != NULL && fec2_dev != NULL ? 0 : -1;
142}
143
144void fec_8xx_platform_cleanup(void)
145{
146 if (fec2_dev != NULL)
147 fec_8xx_cleanup_one(fec2_dev);
148
149 if (fec1_dev != NULL)
150 fec_8xx_cleanup_one(fec1_dev);
151}
diff --git a/drivers/net/fec_8xx/fec_8xx.h b/drivers/net/fec_8xx/fec_8xx.h
deleted file mode 100644
index f3b1c6fbba8b..000000000000
--- a/drivers/net/fec_8xx/fec_8xx.h
+++ /dev/null
@@ -1,220 +0,0 @@
1#ifndef FEC_8XX_H
2#define FEC_8XX_H
3
4#include <linux/mii.h>
5#include <linux/netdevice.h>
6
7#include <linux/types.h>
8
9/* HW info */
10
11/* CRC polynomium used by the FEC for the multicast group filtering */
12#define FEC_CRC_POLY 0x04C11DB7
13
14#define MII_ADVERTISE_HALF (ADVERTISE_100HALF | \
15 ADVERTISE_10HALF | ADVERTISE_CSMA)
16#define MII_ADVERTISE_ALL (ADVERTISE_100FULL | \
17 ADVERTISE_10FULL | MII_ADVERTISE_HALF)
18
19/* Interrupt events/masks.
20*/
21#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
22#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
23#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
24#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
25#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
26#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
27#define FEC_ENET_RXF 0x02000000U /* Full frame received */
28#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
29#define FEC_ENET_MII 0x00800000U /* MII interrupt */
30#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
31
32#define FEC_ECNTRL_PINMUX 0x00000004
33#define FEC_ECNTRL_ETHER_EN 0x00000002
34#define FEC_ECNTRL_RESET 0x00000001
35
36#define FEC_RCNTRL_BC_REJ 0x00000010
37#define FEC_RCNTRL_PROM 0x00000008
38#define FEC_RCNTRL_MII_MODE 0x00000004
39#define FEC_RCNTRL_DRT 0x00000002
40#define FEC_RCNTRL_LOOP 0x00000001
41
42#define FEC_TCNTRL_FDEN 0x00000004
43#define FEC_TCNTRL_HBC 0x00000002
44#define FEC_TCNTRL_GTS 0x00000001
45
46/* values for MII phy_status */
47
48#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
49#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
50#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
51#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
52#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
53#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
54#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
55
56#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
57#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
58#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
59#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
60#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
61#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
62#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
63#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
64
65typedef struct phy_info {
66 unsigned int id;
67 const char *name;
68 void (*startup) (struct net_device * dev);
69 void (*shutdown) (struct net_device * dev);
70 void (*ack_int) (struct net_device * dev);
71} phy_info_t;
72
73/* The FEC stores dest/src/type, data, and checksum for receive packets.
74 */
75#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
76#define MIN_MTU 46 /* this is data size */
77#define CRC_LEN 4
78
79#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
80#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
81
82/* Must be a multiple of 4 */
83#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE+3) & ~3)
84/* This is needed so that invalidate_xxx wont invalidate too much */
85#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE)
86
87/* platform interface */
88
89struct fec_platform_info {
90 int fec_no; /* FEC index */
91 int use_mdio; /* use external MII */
92 int phy_addr; /* the phy address */
93 int fec_irq, phy_irq; /* the irq for the controller */
94 int rx_ring, tx_ring; /* number of buffers on rx */
95 int sys_clk; /* system clock */
96 __u8 macaddr[6]; /* mac address */
97 int rx_copybreak; /* limit we copy small frames */
98 int use_napi; /* use NAPI */
99 int napi_weight; /* NAPI weight */
100};
101
102/* forward declaration */
103struct fec;
104
105struct fec_enet_private {
106 spinlock_t lock; /* during all ops except TX pckt processing */
107 spinlock_t tx_lock; /* during fec_start_xmit and fec_tx */
108 struct net_device *dev;
109 struct napi_struct napi;
110 int fecno;
111 struct fec *fecp;
112 const struct fec_platform_info *fpi;
113 int rx_ring, tx_ring;
114 dma_addr_t ring_mem_addr;
115 void *ring_base;
116 struct sk_buff **rx_skbuff;
117 struct sk_buff **tx_skbuff;
118 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
119 cbd_t *tx_bd_base;
120 cbd_t *dirty_tx; /* ring entries to be free()ed. */
121 cbd_t *cur_rx;
122 cbd_t *cur_tx;
123 int tx_free;
124 struct net_device_stats stats;
125 struct timer_list phy_timer_list;
126 const struct phy_info *phy;
127 unsigned int fec_phy_speed;
128 __u32 msg_enable;
129 struct mii_if_info mii_if;
130};
131
132/***************************************************************************/
133
134void fec_restart(struct net_device *dev, int duplex, int speed);
135void fec_stop(struct net_device *dev);
136
137/***************************************************************************/
138
139int fec_mii_read(struct net_device *dev, int phy_id, int location);
140void fec_mii_write(struct net_device *dev, int phy_id, int location, int value);
141
142int fec_mii_phy_id_detect(struct net_device *dev);
143void fec_mii_startup(struct net_device *dev);
144void fec_mii_shutdown(struct net_device *dev);
145void fec_mii_ack_int(struct net_device *dev);
146
147void fec_mii_link_status_change_check(struct net_device *dev, int init_media);
148
149/***************************************************************************/
150
151#define FEC1_NO 0x00
152#define FEC2_NO 0x01
153#define FEC3_NO 0x02
154
155int fec_8xx_init_one(const struct fec_platform_info *fpi,
156 struct net_device **devp);
157int fec_8xx_cleanup_one(struct net_device *dev);
158
159/***************************************************************************/
160
161#define DRV_MODULE_NAME "fec_8xx"
162#define PFX DRV_MODULE_NAME ": "
163#define DRV_MODULE_VERSION "0.1"
164#define DRV_MODULE_RELDATE "May 6, 2004"
165
166/***************************************************************************/
167
168int fec_8xx_platform_init(void);
169void fec_8xx_platform_cleanup(void);
170
171/***************************************************************************/
172
173/* FEC access macros */
174#if defined(CONFIG_8xx)
175/* for a 8xx __raw_xxx's are sufficient */
176#define __fec_out32(addr, x) __raw_writel(x, addr)
177#define __fec_out16(addr, x) __raw_writew(x, addr)
178#define __fec_in32(addr) __raw_readl(addr)
179#define __fec_in16(addr) __raw_readw(addr)
180#else
181/* for others play it safe */
182#define __fec_out32(addr, x) out_be32(addr, x)
183#define __fec_out16(addr, x) out_be16(addr, x)
184#define __fec_in32(addr) in_be32(addr)
185#define __fec_in16(addr) in_be16(addr)
186#endif
187
188/* write */
189#define FW(_fecp, _reg, _v) __fec_out32(&(_fecp)->fec_ ## _reg, (_v))
190
191/* read */
192#define FR(_fecp, _reg) __fec_in32(&(_fecp)->fec_ ## _reg)
193
194/* set bits */
195#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
196
197/* clear bits */
198#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
199
200/* buffer descriptor access macros */
201
202/* write */
203#define CBDW_SC(_cbd, _sc) __fec_out16(&(_cbd)->cbd_sc, (_sc))
204#define CBDW_DATLEN(_cbd, _datlen) __fec_out16(&(_cbd)->cbd_datlen, (_datlen))
205#define CBDW_BUFADDR(_cbd, _bufaddr) __fec_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
206
207/* read */
208#define CBDR_SC(_cbd) __fec_in16(&(_cbd)->cbd_sc)
209#define CBDR_DATLEN(_cbd) __fec_in16(&(_cbd)->cbd_datlen)
210#define CBDR_BUFADDR(_cbd) __fec_in32(&(_cbd)->cbd_bufaddr)
211
212/* set bits */
213#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
214
215/* clear bits */
216#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
217
218/***************************************************************************/
219
220#endif
diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c
deleted file mode 100644
index ca8d2e83ab03..000000000000
--- a/drivers/net/fec_8xx/fec_main.c
+++ /dev/null
@@ -1,1264 +0,0 @@
1/*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
8 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
9 *
10 * Released under the GPL
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/ptrace.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mii.h>
29#include <linux/ethtool.h>
30#include <linux/bitops.h>
31#include <linux/dma-mapping.h>
32
33#include <asm/8xx_immap.h>
34#include <asm/pgtable.h>
35#include <asm/mpc8xx.h>
36#include <asm/irq.h>
37#include <asm/uaccess.h>
38#include <asm/cpm1.h>
39
40#include "fec_8xx.h"
41
42/*************************************************/
43
44#define FEC_MAX_MULTICAST_ADDRS 64
45
46/*************************************************/
47
48static char version[] __devinitdata =
49 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
50
51MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
52MODULE_DESCRIPTION("Motorola 8xx FEC ethernet driver");
53MODULE_LICENSE("GPL");
54
55int fec_8xx_debug = -1; /* -1 == use FEC_8XX_DEF_MSG_ENABLE as value */
56module_param(fec_8xx_debug, int, 0);
57MODULE_PARM_DESC(fec_8xx_debug,
58 "FEC 8xx bitmapped debugging message enable value");
59
60
61/*************************************************/
62
63/*
64 * Delay to wait for FEC reset command to complete (in us)
65 */
66#define FEC_RESET_DELAY 50
67
68/*****************************************************************************************/
69
70static void fec_whack_reset(fec_t * fecp)
71{
72 int i;
73
74 /*
75 * Whack a reset. We should wait for this.
76 */
77 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
78 for (i = 0;
79 (FR(fecp, ecntrl) & FEC_ECNTRL_RESET) != 0 && i < FEC_RESET_DELAY;
80 i++)
81 udelay(1);
82
83 if (i == FEC_RESET_DELAY)
84 printk(KERN_WARNING "FEC Reset timeout!\n");
85
86}
87
88/****************************************************************************/
89
90/*
91 * Transmitter timeout.
92 */
93#define TX_TIMEOUT (2*HZ)
94
95/****************************************************************************/
96
97/*
98 * Returns the CRC needed when filling in the hash table for
99 * multicast group filtering
100 * pAddr must point to a MAC address (6 bytes)
101 */
102static __u32 fec_mulicast_calc_crc(char *pAddr)
103{
104 u8 byte;
105 int byte_count;
106 int bit_count;
107 __u32 crc = 0xffffffff;
108 u8 msb;
109
110 for (byte_count = 0; byte_count < 6; byte_count++) {
111 byte = pAddr[byte_count];
112 for (bit_count = 0; bit_count < 8; bit_count++) {
113 msb = crc >> 31;
114 crc <<= 1;
115 if (msb ^ (byte & 0x1)) {
116 crc ^= FEC_CRC_POLY;
117 }
118 byte >>= 1;
119 }
120 }
121 return (crc);
122}
123
124/*
125 * Set or clear the multicast filter for this adaptor.
126 * Skeleton taken from sunlance driver.
127 * The CPM Ethernet implementation allows Multicast as well as individual
128 * MAC address filtering. Some of the drivers check to make sure it is
129 * a group multicast address, and discard those that are not. I guess I
130 * will do the same for now, but just remove the test if you want
131 * individual filtering as well (do the upper net layers want or support
132 * this kind of feature?).
133 */
134static void fec_set_multicast_list(struct net_device *dev)
135{
136 struct fec_enet_private *fep = netdev_priv(dev);
137 fec_t *fecp = fep->fecp;
138 struct dev_mc_list *pmc;
139 __u32 crc;
140 int temp;
141 __u32 csrVal;
142 int hash_index;
143 __u32 hthi, htlo;
144 unsigned long flags;
145
146
147 if ((dev->flags & IFF_PROMISC) != 0) {
148
149 spin_lock_irqsave(&fep->lock, flags);
150 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
151 spin_unlock_irqrestore(&fep->lock, flags);
152
153 /*
154 * Log any net taps.
155 */
156 printk(KERN_WARNING DRV_MODULE_NAME
157 ": %s: Promiscuous mode enabled.\n", dev->name);
158 return;
159
160 }
161
162 if ((dev->flags & IFF_ALLMULTI) != 0 ||
163 dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
164 /*
165 * Catch all multicast addresses, set the filter to all 1's.
166 */
167 hthi = 0xffffffffU;
168 htlo = 0xffffffffU;
169 } else {
170 hthi = 0;
171 htlo = 0;
172
173 /*
174 * Now populate the hash table
175 */
176 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) {
177 crc = fec_mulicast_calc_crc(pmc->dmi_addr);
178 temp = (crc & 0x3f) >> 1;
179 hash_index = ((temp & 0x01) << 4) |
180 ((temp & 0x02) << 2) |
181 ((temp & 0x04)) |
182 ((temp & 0x08) >> 2) |
183 ((temp & 0x10) >> 4);
184 csrVal = (1 << hash_index);
185 if (crc & 1)
186 hthi |= csrVal;
187 else
188 htlo |= csrVal;
189 }
190 }
191
192 spin_lock_irqsave(&fep->lock, flags);
193 FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
194 FW(fecp, hash_table_high, hthi);
195 FW(fecp, hash_table_low, htlo);
196 spin_unlock_irqrestore(&fep->lock, flags);
197}
198
199static int fec_set_mac_address(struct net_device *dev, void *addr)
200{
201 struct sockaddr *mac = addr;
202 struct fec_enet_private *fep = netdev_priv(dev);
203 struct fec *fecp = fep->fecp;
204 int i;
205 __u32 addrhi, addrlo;
206 unsigned long flags;
207
208 /* Get pointer to SCC area in parameter RAM. */
209 for (i = 0; i < 6; i++)
210 dev->dev_addr[i] = mac->sa_data[i];
211
212 /*
213 * Set station address.
214 */
215 addrhi = ((__u32) dev->dev_addr[0] << 24) |
216 ((__u32) dev->dev_addr[1] << 16) |
217 ((__u32) dev->dev_addr[2] << 8) |
218 (__u32) dev->dev_addr[3];
219 addrlo = ((__u32) dev->dev_addr[4] << 24) |
220 ((__u32) dev->dev_addr[5] << 16);
221
222 spin_lock_irqsave(&fep->lock, flags);
223 FW(fecp, addr_low, addrhi);
224 FW(fecp, addr_high, addrlo);
225 spin_unlock_irqrestore(&fep->lock, flags);
226
227 return 0;
228}
229
230/*
231 * This function is called to start or restart the FEC during a link
232 * change. This only happens when switching between half and full
233 * duplex.
234 */
235void fec_restart(struct net_device *dev, int duplex, int speed)
236{
237#ifdef CONFIG_DUET
238 immap_t *immap = (immap_t *) IMAP_ADDR;
239 __u32 cptr;
240#endif
241 struct fec_enet_private *fep = netdev_priv(dev);
242 struct fec *fecp = fep->fecp;
243 const struct fec_platform_info *fpi = fep->fpi;
244 cbd_t *bdp;
245 struct sk_buff *skb;
246 int i;
247 __u32 addrhi, addrlo;
248
249 fec_whack_reset(fep->fecp);
250
251 /*
252 * Set station address.
253 */
254 addrhi = ((__u32) dev->dev_addr[0] << 24) |
255 ((__u32) dev->dev_addr[1] << 16) |
256 ((__u32) dev->dev_addr[2] << 8) |
257 (__u32) dev->dev_addr[3];
258 addrlo = ((__u32) dev->dev_addr[4] << 24) |
259 ((__u32) dev->dev_addr[5] << 16);
260 FW(fecp, addr_low, addrhi);
261 FW(fecp, addr_high, addrlo);
262
263 /*
264 * Reset all multicast.
265 */
266 FW(fecp, hash_table_high, 0);
267 FW(fecp, hash_table_low, 0);
268
269 /*
270 * Set maximum receive buffer size.
271 */
272 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
273 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
274
275 /*
276 * Set receive and transmit descriptor base.
277 */
278 FW(fecp, r_des_start, iopa((__u32) (fep->rx_bd_base)));
279 FW(fecp, x_des_start, iopa((__u32) (fep->tx_bd_base)));
280
281 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
282 fep->tx_free = fep->tx_ring;
283 fep->cur_rx = fep->rx_bd_base;
284
285 /*
286 * Reset SKB receive buffers
287 */
288 for (i = 0; i < fep->rx_ring; i++) {
289 if ((skb = fep->rx_skbuff[i]) == NULL)
290 continue;
291 fep->rx_skbuff[i] = NULL;
292 dev_kfree_skb(skb);
293 }
294
295 /*
296 * Initialize the receive buffer descriptors.
297 */
298 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
299 skb = dev_alloc_skb(ENET_RX_FRSIZE);
300 if (skb == NULL) {
301 printk(KERN_WARNING DRV_MODULE_NAME
302 ": %s Memory squeeze, unable to allocate skb\n",
303 dev->name);
304 fep->stats.rx_dropped++;
305 break;
306 }
307 fep->rx_skbuff[i] = skb;
308 skb->dev = dev;
309 CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data,
310 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
311 DMA_FROM_DEVICE));
312 CBDW_DATLEN(bdp, 0); /* zero */
313 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
314 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
315 }
316 /*
317 * if we failed, fillup remainder
318 */
319 for (; i < fep->rx_ring; i++, bdp++) {
320 fep->rx_skbuff[i] = NULL;
321 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
322 }
323
324 /*
325 * Reset SKB transmit buffers.
326 */
327 for (i = 0; i < fep->tx_ring; i++) {
328 if ((skb = fep->tx_skbuff[i]) == NULL)
329 continue;
330 fep->tx_skbuff[i] = NULL;
331 dev_kfree_skb(skb);
332 }
333
334 /*
335 * ...and the same for transmit.
336 */
337 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
338 fep->tx_skbuff[i] = NULL;
339 CBDW_BUFADDR(bdp, virt_to_bus(NULL));
340 CBDW_DATLEN(bdp, 0);
341 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
342 }
343
344 /*
345 * Enable big endian and don't care about SDMA FC.
346 */
347 FW(fecp, fun_code, 0x78000000);
348
349 /*
350 * Set MII speed.
351 */
352 FW(fecp, mii_speed, fep->fec_phy_speed);
353
354 /*
355 * Clear any outstanding interrupt.
356 */
357 FW(fecp, ievent, 0xffc0);
358 FW(fecp, ivec, (fpi->fec_irq / 2) << 29);
359
360 /*
361 * adjust to speed (only for DUET & RMII)
362 */
363#ifdef CONFIG_DUET
364 cptr = in_be32(&immap->im_cpm.cp_cptr);
365 switch (fpi->fec_no) {
366 case 0:
367 /*
368 * check if in RMII mode
369 */
370 if ((cptr & 0x100) == 0)
371 break;
372
373 if (speed == 10)
374 cptr |= 0x0000010;
375 else if (speed == 100)
376 cptr &= ~0x0000010;
377 break;
378 case 1:
379 /*
380 * check if in RMII mode
381 */
382 if ((cptr & 0x80) == 0)
383 break;
384
385 if (speed == 10)
386 cptr |= 0x0000008;
387 else if (speed == 100)
388 cptr &= ~0x0000008;
389 break;
390 default:
391 break;
392 }
393 out_be32(&immap->im_cpm.cp_cptr, cptr);
394#endif
395
396 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
397 /*
398 * adjust to duplex mode
399 */
400 if (duplex) {
401 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
402 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
403 } else {
404 FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
405 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
406 }
407
408 /*
409 * Enable interrupts we wish to service.
410 */
411 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
412 FEC_ENET_RXF | FEC_ENET_RXB);
413
414 /*
415 * And last, enable the transmit and receive processing.
416 */
417 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
418 FW(fecp, r_des_active, 0x01000000);
419}
420
421void fec_stop(struct net_device *dev)
422{
423 struct fec_enet_private *fep = netdev_priv(dev);
424 fec_t *fecp = fep->fecp;
425 struct sk_buff *skb;
426 int i;
427
428 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
429 return; /* already down */
430
431 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
432 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
433 i < FEC_RESET_DELAY; i++)
434 udelay(1);
435
436 if (i == FEC_RESET_DELAY)
437 printk(KERN_WARNING DRV_MODULE_NAME
438 ": %s FEC timeout on graceful transmit stop\n",
439 dev->name);
440 /*
441 * Disable FEC. Let only MII interrupts.
442 */
443 FW(fecp, imask, 0);
444 FW(fecp, ecntrl, ~FEC_ECNTRL_ETHER_EN);
445
446 /*
447 * Reset SKB transmit buffers.
448 */
449 for (i = 0; i < fep->tx_ring; i++) {
450 if ((skb = fep->tx_skbuff[i]) == NULL)
451 continue;
452 fep->tx_skbuff[i] = NULL;
453 dev_kfree_skb(skb);
454 }
455
456 /*
457 * Reset SKB receive buffers
458 */
459 for (i = 0; i < fep->rx_ring; i++) {
460 if ((skb = fep->rx_skbuff[i]) == NULL)
461 continue;
462 fep->rx_skbuff[i] = NULL;
463 dev_kfree_skb(skb);
464 }
465}
466
467/* common receive function */
468static int fec_enet_rx_common(struct fec_enet_private *ep,
469 struct net_device *dev, int budget)
470{
471 fec_t *fecp = fep->fecp;
472 const struct fec_platform_info *fpi = fep->fpi;
473 cbd_t *bdp;
474 struct sk_buff *skb, *skbn, *skbt;
475 int received = 0;
476 __u16 pkt_len, sc;
477 int curidx;
478
479 /*
480 * First, grab all of the stats for the incoming packet.
481 * These get messed up if we get called due to a busy condition.
482 */
483 bdp = fep->cur_rx;
484
485 /* clear RX status bits for napi*/
486 if (fpi->use_napi)
487 FW(fecp, ievent, FEC_ENET_RXF | FEC_ENET_RXB);
488
489 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
490
491 curidx = bdp - fep->rx_bd_base;
492
493 /*
494 * Since we have allocated space to hold a complete frame,
495 * the last indicator should be set.
496 */
497 if ((sc & BD_ENET_RX_LAST) == 0)
498 printk(KERN_WARNING DRV_MODULE_NAME
499 ": %s rcv is not +last\n",
500 dev->name);
501
502 /*
503 * Check for errors.
504 */
505 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
506 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
507 fep->stats.rx_errors++;
508 /* Frame too long or too short. */
509 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
510 fep->stats.rx_length_errors++;
511 /* Frame alignment */
512 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
513 fep->stats.rx_frame_errors++;
514 /* CRC Error */
515 if (sc & BD_ENET_RX_CR)
516 fep->stats.rx_crc_errors++;
517 /* FIFO overrun */
518 if (sc & BD_ENET_RX_OV)
519 fep->stats.rx_crc_errors++;
520
521 skbn = fep->rx_skbuff[curidx];
522 BUG_ON(skbn == NULL);
523
524 } else {
525 skb = fep->rx_skbuff[curidx];
526 BUG_ON(skb == NULL);
527
528 /*
529 * Process the incoming frame.
530 */
531 fep->stats.rx_packets++;
532 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
533 fep->stats.rx_bytes += pkt_len + 4;
534
535 if (pkt_len <= fpi->rx_copybreak) {
536 /* +2 to make IP header L1 cache aligned */
537 skbn = dev_alloc_skb(pkt_len + 2);
538 if (skbn != NULL) {
539 skb_reserve(skbn, 2); /* align IP header */
540 skb_copy_from_linear_data(skb,
541 skbn->data,
542 pkt_len);
543 /* swap */
544 skbt = skb;
545 skb = skbn;
546 skbn = skbt;
547 }
548 } else
549 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
550
551 if (skbn != NULL) {
552 skb_put(skb, pkt_len); /* Make room */
553 skb->protocol = eth_type_trans(skb, dev);
554 received++;
555 if (!fpi->use_napi)
556 netif_rx(skb);
557 else
558 netif_receive_skb(skb);
559 } else {
560 printk(KERN_WARNING DRV_MODULE_NAME
561 ": %s Memory squeeze, dropping packet.\n",
562 dev->name);
563 fep->stats.rx_dropped++;
564 skbn = skb;
565 }
566 }
567
568 fep->rx_skbuff[curidx] = skbn;
569 CBDW_BUFADDR(bdp, dma_map_single(NULL, skbn->data,
570 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
571 DMA_FROM_DEVICE));
572 CBDW_DATLEN(bdp, 0);
573 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
574
575 /*
576 * Update BD pointer to next entry.
577 */
578 if ((sc & BD_ENET_RX_WRAP) == 0)
579 bdp++;
580 else
581 bdp = fep->rx_bd_base;
582
583 /*
584 * Doing this here will keep the FEC running while we process
585 * incoming frames. On a heavily loaded network, we should be
586 * able to keep up at the expense of system resources.
587 */
588 FW(fecp, r_des_active, 0x01000000);
589
590 if (received >= budget)
591 break;
592
593 }
594
595 fep->cur_rx = bdp;
596
597 if (fpi->use_napi) {
598 if (received < budget) {
599 netif_rx_complete(dev, &fep->napi);
600
601 /* enable RX interrupt bits */
602 FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
603 }
604 }
605
606 return received;
607}
608
609static void fec_enet_tx(struct net_device *dev)
610{
611 struct fec_enet_private *fep = netdev_priv(dev);
612 cbd_t *bdp;
613 struct sk_buff *skb;
614 int dirtyidx, do_wake;
615 __u16 sc;
616
617 spin_lock(&fep->lock);
618 bdp = fep->dirty_tx;
619
620 do_wake = 0;
621 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
622
623 dirtyidx = bdp - fep->tx_bd_base;
624
625 if (fep->tx_free == fep->tx_ring)
626 break;
627
628 skb = fep->tx_skbuff[dirtyidx];
629
630 /*
631 * Check for errors.
632 */
633 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
634 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
635 fep->stats.tx_errors++;
636 if (sc & BD_ENET_TX_HB) /* No heartbeat */
637 fep->stats.tx_heartbeat_errors++;
638 if (sc & BD_ENET_TX_LC) /* Late collision */
639 fep->stats.tx_window_errors++;
640 if (sc & BD_ENET_TX_RL) /* Retrans limit */
641 fep->stats.tx_aborted_errors++;
642 if (sc & BD_ENET_TX_UN) /* Underrun */
643 fep->stats.tx_fifo_errors++;
644 if (sc & BD_ENET_TX_CSL) /* Carrier lost */
645 fep->stats.tx_carrier_errors++;
646 } else
647 fep->stats.tx_packets++;
648
649 if (sc & BD_ENET_TX_READY)
650 printk(KERN_WARNING DRV_MODULE_NAME
651 ": %s HEY! Enet xmit interrupt and TX_READY.\n",
652 dev->name);
653
654 /*
655 * Deferred means some collisions occurred during transmit,
656 * but we eventually sent the packet OK.
657 */
658 if (sc & BD_ENET_TX_DEF)
659 fep->stats.collisions++;
660
661 /*
662 * Free the sk buffer associated with this last transmit.
663 */
664 dev_kfree_skb_irq(skb);
665 fep->tx_skbuff[dirtyidx] = NULL;
666
667 /*
668 * Update pointer to next buffer descriptor to be transmitted.
669 */
670 if ((sc & BD_ENET_TX_WRAP) == 0)
671 bdp++;
672 else
673 bdp = fep->tx_bd_base;
674
675 /*
676 * Since we have freed up a buffer, the ring is no longer
677 * full.
678 */
679 if (!fep->tx_free++)
680 do_wake = 1;
681 }
682
683 fep->dirty_tx = bdp;
684
685 spin_unlock(&fep->lock);
686
687 if (do_wake && netif_queue_stopped(dev))
688 netif_wake_queue(dev);
689}
690
691/*
692 * The interrupt handler.
693 * This is called from the MPC core interrupt.
694 */
695static irqreturn_t
696fec_enet_interrupt(int irq, void *dev_id)
697{
698 struct net_device *dev = dev_id;
699 struct fec_enet_private *fep;
700 const struct fec_platform_info *fpi;
701 fec_t *fecp;
702 __u32 int_events;
703 __u32 int_events_napi;
704
705 if (unlikely(dev == NULL))
706 return IRQ_NONE;
707
708 fep = netdev_priv(dev);
709 fecp = fep->fecp;
710 fpi = fep->fpi;
711
712 /*
713 * Get the interrupt events that caused us to be here.
714 */
715 while ((int_events = FR(fecp, ievent) & FR(fecp, imask)) != 0) {
716
717 if (!fpi->use_napi)
718 FW(fecp, ievent, int_events);
719 else {
720 int_events_napi = int_events & ~(FEC_ENET_RXF | FEC_ENET_RXB);
721 FW(fecp, ievent, int_events_napi);
722 }
723
724 if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR |
725 FEC_ENET_BABT | FEC_ENET_EBERR)) != 0)
726 printk(KERN_WARNING DRV_MODULE_NAME
727 ": %s FEC ERROR(s) 0x%x\n",
728 dev->name, int_events);
729
730 if ((int_events & FEC_ENET_RXF) != 0) {
731 if (!fpi->use_napi)
732 fec_enet_rx_common(fep, dev, ~0);
733 else {
734 if (netif_rx_schedule_prep(dev, &fep->napi)) {
735 /* disable rx interrupts */
736 FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
737 __netif_rx_schedule(dev, &fep->napi);
738 } else {
739 printk(KERN_ERR DRV_MODULE_NAME
740 ": %s driver bug! interrupt while in poll!\n",
741 dev->name);
742 FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB);
743 }
744 }
745 }
746
747 if ((int_events & FEC_ENET_TXF) != 0)
748 fec_enet_tx(dev);
749 }
750
751 return IRQ_HANDLED;
752}
753
754/* This interrupt occurs when the PHY detects a link change. */
755static irqreturn_t
756fec_mii_link_interrupt(int irq, void *dev_id)
757{
758 struct net_device *dev = dev_id;
759 struct fec_enet_private *fep;
760 const struct fec_platform_info *fpi;
761
762 if (unlikely(dev == NULL))
763 return IRQ_NONE;
764
765 fep = netdev_priv(dev);
766 fpi = fep->fpi;
767
768 if (!fpi->use_mdio)
769 return IRQ_NONE;
770
771 /*
772 * Acknowledge the interrupt if possible. If we have not
773 * found the PHY yet we can't process or acknowledge the
774 * interrupt now. Instead we ignore this interrupt for now,
775 * which we can do since it is edge triggered. It will be
776 * acknowledged later by fec_enet_open().
777 */
778 if (!fep->phy)
779 return IRQ_NONE;
780
781 fec_mii_ack_int(dev);
782 fec_mii_link_status_change_check(dev, 0);
783
784 return IRQ_HANDLED;
785}
786
787
788/**********************************************************************************/
789
790static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
791{
792 struct fec_enet_private *fep = netdev_priv(dev);
793 fec_t *fecp = fep->fecp;
794 cbd_t *bdp;
795 int curidx;
796 unsigned long flags;
797
798 spin_lock_irqsave(&fep->tx_lock, flags);
799
800 /*
801 * Fill in a Tx ring entry
802 */
803 bdp = fep->cur_tx;
804
805 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
806 netif_stop_queue(dev);
807 spin_unlock_irqrestore(&fep->tx_lock, flags);
808
809 /*
810 * Ooops. All transmit buffers are full. Bail out.
811 * This should not happen, since the tx queue should be stopped.
812 */
813 printk(KERN_WARNING DRV_MODULE_NAME
814 ": %s tx queue full!.\n", dev->name);
815 return 1;
816 }
817
818 curidx = bdp - fep->tx_bd_base;
819 /*
820 * Clear all of the status flags.
821 */
822 CBDC_SC(bdp, BD_ENET_TX_STATS);
823
824 /*
825 * Save skb pointer.
826 */
827 fep->tx_skbuff[curidx] = skb;
828
829 fep->stats.tx_bytes += skb->len;
830
831 /*
832 * Push the data cache so the CPM does not get stale memory data.
833 */
834 CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data,
835 skb->len, DMA_TO_DEVICE));
836 CBDW_DATLEN(bdp, skb->len);
837
838 dev->trans_start = jiffies;
839
840 /*
841 * If this was the last BD in the ring, start at the beginning again.
842 */
843 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
844 fep->cur_tx++;
845 else
846 fep->cur_tx = fep->tx_bd_base;
847
848 if (!--fep->tx_free)
849 netif_stop_queue(dev);
850
851 /*
852 * Trigger transmission start
853 */
854 CBDS_SC(bdp, BD_ENET_TX_READY | BD_ENET_TX_INTR |
855 BD_ENET_TX_LAST | BD_ENET_TX_TC);
856 FW(fecp, x_des_active, 0x01000000);
857
858 spin_unlock_irqrestore(&fep->tx_lock, flags);
859
860 return 0;
861}
862
863static void fec_timeout(struct net_device *dev)
864{
865 struct fec_enet_private *fep = netdev_priv(dev);
866
867 fep->stats.tx_errors++;
868
869 if (fep->tx_free)
870 netif_wake_queue(dev);
871
872 /* check link status again */
873 fec_mii_link_status_change_check(dev, 0);
874}
875
876static int fec_enet_open(struct net_device *dev)
877{
878 struct fec_enet_private *fep = netdev_priv(dev);
879 const struct fec_platform_info *fpi = fep->fpi;
880 unsigned long flags;
881
882 napi_enable(&fep->napi);
883
884 /* Install our interrupt handler. */
885 if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) {
886 printk(KERN_ERR DRV_MODULE_NAME
887 ": %s Could not allocate FEC IRQ!", dev->name);
888 napi_disable(&fep->napi);
889 return -EINVAL;
890 }
891
892 /* Install our phy interrupt handler */
893 if (fpi->phy_irq != -1 &&
894 request_irq(fpi->phy_irq, fec_mii_link_interrupt, 0, "fec-phy",
895 dev) != 0) {
896 printk(KERN_ERR DRV_MODULE_NAME
897 ": %s Could not allocate PHY IRQ!", dev->name);
898 free_irq(fpi->fec_irq, dev);
899 napi_disable(&fep->napi);
900 return -EINVAL;
901 }
902
903 if (fpi->use_mdio) {
904 fec_mii_startup(dev);
905 netif_carrier_off(dev);
906 fec_mii_link_status_change_check(dev, 1);
907 } else {
908 spin_lock_irqsave(&fep->lock, flags);
909 fec_restart(dev, 1, 100); /* XXX this sucks */
910 spin_unlock_irqrestore(&fep->lock, flags);
911
912 netif_carrier_on(dev);
913 netif_start_queue(dev);
914 }
915 return 0;
916}
917
918static int fec_enet_close(struct net_device *dev)
919{
920 struct fec_enet_private *fep = netdev_priv(dev);
921 const struct fec_platform_info *fpi = fep->fpi;
922 unsigned long flags;
923
924 netif_stop_queue(dev);
925 napi_disable(&fep->napi);
926 netif_carrier_off(dev);
927
928 if (fpi->use_mdio)
929 fec_mii_shutdown(dev);
930
931 spin_lock_irqsave(&fep->lock, flags);
932 fec_stop(dev);
933 spin_unlock_irqrestore(&fep->lock, flags);
934
935 /* release any irqs */
936 if (fpi->phy_irq != -1)
937 free_irq(fpi->phy_irq, dev);
938 free_irq(fpi->fec_irq, dev);
939
940 return 0;
941}
942
943static struct net_device_stats *fec_enet_get_stats(struct net_device *dev)
944{
945 struct fec_enet_private *fep = netdev_priv(dev);
946 return &fep->stats;
947}
948
949static int fec_enet_poll(struct napi_struct *napi, int budget)
950{
951 struct fec_enet_private *fep = container_of(napi, struct fec_enet_private, napi);
952 struct net_device *dev = fep->dev;
953
954 return fec_enet_rx_common(fep, dev, budget);
955}
956
957/*************************************************************************/
958
959static void fec_get_drvinfo(struct net_device *dev,
960 struct ethtool_drvinfo *info)
961{
962 strcpy(info->driver, DRV_MODULE_NAME);
963 strcpy(info->version, DRV_MODULE_VERSION);
964}
965
966static int fec_get_regs_len(struct net_device *dev)
967{
968 return sizeof(fec_t);
969}
970
971static void fec_get_regs(struct net_device *dev, struct ethtool_regs *regs,
972 void *p)
973{
974 struct fec_enet_private *fep = netdev_priv(dev);
975 unsigned long flags;
976
977 if (regs->len < sizeof(fec_t))
978 return;
979
980 regs->version = 0;
981 spin_lock_irqsave(&fep->lock, flags);
982 memcpy_fromio(p, fep->fecp, sizeof(fec_t));
983 spin_unlock_irqrestore(&fep->lock, flags);
984}
985
986static int fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
987{
988 struct fec_enet_private *fep = netdev_priv(dev);
989 unsigned long flags;
990 int rc;
991
992 spin_lock_irqsave(&fep->lock, flags);
993 rc = mii_ethtool_gset(&fep->mii_if, cmd);
994 spin_unlock_irqrestore(&fep->lock, flags);
995
996 return rc;
997}
998
999static int fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1000{
1001 struct fec_enet_private *fep = netdev_priv(dev);
1002 unsigned long flags;
1003 int rc;
1004
1005 spin_lock_irqsave(&fep->lock, flags);
1006 rc = mii_ethtool_sset(&fep->mii_if, cmd);
1007 spin_unlock_irqrestore(&fep->lock, flags);
1008
1009 return rc;
1010}
1011
1012static int fec_nway_reset(struct net_device *dev)
1013{
1014 struct fec_enet_private *fep = netdev_priv(dev);
1015 return mii_nway_restart(&fep->mii_if);
1016}
1017
1018static __u32 fec_get_msglevel(struct net_device *dev)
1019{
1020 struct fec_enet_private *fep = netdev_priv(dev);
1021 return fep->msg_enable;
1022}
1023
1024static void fec_set_msglevel(struct net_device *dev, __u32 value)
1025{
1026 struct fec_enet_private *fep = netdev_priv(dev);
1027 fep->msg_enable = value;
1028}
1029
1030static const struct ethtool_ops fec_ethtool_ops = {
1031 .get_drvinfo = fec_get_drvinfo,
1032 .get_regs_len = fec_get_regs_len,
1033 .get_settings = fec_get_settings,
1034 .set_settings = fec_set_settings,
1035 .nway_reset = fec_nway_reset,
1036 .get_link = ethtool_op_get_link,
1037 .get_msglevel = fec_get_msglevel,
1038 .set_msglevel = fec_set_msglevel,
1039 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1040 .set_sg = ethtool_op_set_sg,
1041 .get_regs = fec_get_regs,
1042};
1043
1044static int fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1045{
1046 struct fec_enet_private *fep = netdev_priv(dev);
1047 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
1048 unsigned long flags;
1049 int rc;
1050
1051 if (!netif_running(dev))
1052 return -EINVAL;
1053
1054 spin_lock_irqsave(&fep->lock, flags);
1055 rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
1056 spin_unlock_irqrestore(&fep->lock, flags);
1057 return rc;
1058}
1059
1060int fec_8xx_init_one(const struct fec_platform_info *fpi,
1061 struct net_device **devp)
1062{
1063 immap_t *immap = (immap_t *) IMAP_ADDR;
1064 static int fec_8xx_version_printed = 0;
1065 struct net_device *dev = NULL;
1066 struct fec_enet_private *fep = NULL;
1067 fec_t *fecp = NULL;
1068 int i;
1069 int err = 0;
1070 int registered = 0;
1071 __u32 siel;
1072
1073 *devp = NULL;
1074
1075 switch (fpi->fec_no) {
1076 case 0:
1077 fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec;
1078 break;
1079#ifdef CONFIG_DUET
1080 case 1:
1081 fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec2;
1082 break;
1083#endif
1084 default:
1085 return -EINVAL;
1086 }
1087
1088 if (fec_8xx_version_printed++ == 0)
1089 printk(KERN_INFO "%s", version);
1090
1091 i = sizeof(*fep) + (sizeof(struct sk_buff **) *
1092 (fpi->rx_ring + fpi->tx_ring));
1093
1094 dev = alloc_etherdev(i);
1095 if (!dev) {
1096 err = -ENOMEM;
1097 goto err;
1098 }
1099
1100 fep = netdev_priv(dev);
1101 fep->dev = dev;
1102
1103 /* partial reset of FEC */
1104 fec_whack_reset(fecp);
1105
1106 /* point rx_skbuff, tx_skbuff */
1107 fep->rx_skbuff = (struct sk_buff **)&fep[1];
1108 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1109
1110 fep->fecp = fecp;
1111 fep->fpi = fpi;
1112
1113 /* init locks */
1114 spin_lock_init(&fep->lock);
1115 spin_lock_init(&fep->tx_lock);
1116
1117 /*
1118 * Set the Ethernet address.
1119 */
1120 for (i = 0; i < 6; i++)
1121 dev->dev_addr[i] = fpi->macaddr[i];
1122
1123 fep->ring_base = dma_alloc_coherent(NULL,
1124 (fpi->tx_ring + fpi->rx_ring) *
1125 sizeof(cbd_t), &fep->ring_mem_addr,
1126 GFP_KERNEL);
1127 if (fep->ring_base == NULL) {
1128 printk(KERN_ERR DRV_MODULE_NAME
1129 ": %s dma alloc failed.\n", dev->name);
1130 err = -ENOMEM;
1131 goto err;
1132 }
1133
1134 /*
1135 * Set receive and transmit descriptor base.
1136 */
1137 fep->rx_bd_base = fep->ring_base;
1138 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1139
1140 /* initialize ring size variables */
1141 fep->tx_ring = fpi->tx_ring;
1142 fep->rx_ring = fpi->rx_ring;
1143
1144 /* SIU interrupt */
1145 if (fpi->phy_irq != -1 &&
1146 (fpi->phy_irq >= SIU_IRQ0 && fpi->phy_irq < SIU_LEVEL7)) {
1147
1148 siel = in_be32(&immap->im_siu_conf.sc_siel);
1149 if ((fpi->phy_irq & 1) == 0)
1150 siel |= (0x80000000 >> fpi->phy_irq);
1151 else
1152 siel &= ~(0x80000000 >> (fpi->phy_irq & ~1));
1153 out_be32(&immap->im_siu_conf.sc_siel, siel);
1154 }
1155
1156 /*
1157 * The FEC Ethernet specific entries in the device structure.
1158 */
1159 dev->open = fec_enet_open;
1160 dev->hard_start_xmit = fec_enet_start_xmit;
1161 dev->tx_timeout = fec_timeout;
1162 dev->watchdog_timeo = TX_TIMEOUT;
1163 dev->stop = fec_enet_close;
1164 dev->get_stats = fec_enet_get_stats;
1165 dev->set_multicast_list = fec_set_multicast_list;
1166 dev->set_mac_address = fec_set_mac_address;
1167 netif_napi_add(dev, &fec->napi,
1168 fec_enet_poll, fpi->napi_weight);
1169
1170 dev->ethtool_ops = &fec_ethtool_ops;
1171 dev->do_ioctl = fec_ioctl;
1172
1173 fep->fec_phy_speed =
1174 ((((fpi->sys_clk + 4999999) / 2500000) / 2) & 0x3F) << 1;
1175
1176 init_timer(&fep->phy_timer_list);
1177
1178 /* partial reset of FEC so that only MII works */
1179 FW(fecp, mii_speed, fep->fec_phy_speed);
1180 FW(fecp, ievent, 0xffc0);
1181 FW(fecp, ivec, (fpi->fec_irq / 2) << 29);
1182 FW(fecp, imask, 0);
1183 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
1184 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
1185
1186 netif_carrier_off(dev);
1187
1188 err = register_netdev(dev);
1189 if (err != 0)
1190 goto err;
1191 registered = 1;
1192
1193 if (fpi->use_mdio) {
1194 fep->mii_if.dev = dev;
1195 fep->mii_if.mdio_read = fec_mii_read;
1196 fep->mii_if.mdio_write = fec_mii_write;
1197 fep->mii_if.phy_id_mask = 0x1f;
1198 fep->mii_if.reg_num_mask = 0x1f;
1199 fep->mii_if.phy_id = fec_mii_phy_id_detect(dev);
1200 }
1201
1202 *devp = dev;
1203
1204 return 0;
1205
1206 err:
1207 if (dev != NULL) {
1208 if (fecp != NULL)
1209 fec_whack_reset(fecp);
1210
1211 if (registered)
1212 unregister_netdev(dev);
1213
1214 if (fep != NULL) {
1215 if (fep->ring_base)
1216 dma_free_coherent(NULL,
1217 (fpi->tx_ring +
1218 fpi->rx_ring) *
1219 sizeof(cbd_t), fep->ring_base,
1220 fep->ring_mem_addr);
1221 }
1222 free_netdev(dev);
1223 }
1224 return err;
1225}
1226
1227int fec_8xx_cleanup_one(struct net_device *dev)
1228{
1229 struct fec_enet_private *fep = netdev_priv(dev);
1230 fec_t *fecp = fep->fecp;
1231 const struct fec_platform_info *fpi = fep->fpi;
1232
1233 fec_whack_reset(fecp);
1234
1235 unregister_netdev(dev);
1236
1237 dma_free_coherent(NULL, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1238 fep->ring_base, fep->ring_mem_addr);
1239
1240 free_netdev(dev);
1241
1242 return 0;
1243}
1244
1245/**************************************************************************************/
1246/**************************************************************************************/
1247/**************************************************************************************/
1248
1249static int __init fec_8xx_init(void)
1250{
1251 return fec_8xx_platform_init();
1252}
1253
1254static void __exit fec_8xx_cleanup(void)
1255{
1256 fec_8xx_platform_cleanup();
1257}
1258
1259/**************************************************************************************/
1260/**************************************************************************************/
1261/**************************************************************************************/
1262
1263module_init(fec_8xx_init);
1264module_exit(fec_8xx_cleanup);
diff --git a/drivers/net/fec_8xx/fec_mii.c b/drivers/net/fec_8xx/fec_mii.c
deleted file mode 100644
index 3b6ca29d31f2..000000000000
--- a/drivers/net/fec_8xx/fec_mii.c
+++ /dev/null
@@ -1,418 +0,0 @@
1/*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
8 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
9 *
10 * Released under the GPL
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/ptrace.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mii.h>
29#include <linux/ethtool.h>
30#include <linux/bitops.h>
31
32#include <asm/8xx_immap.h>
33#include <asm/pgtable.h>
34#include <asm/mpc8xx.h>
35#include <asm/irq.h>
36#include <asm/uaccess.h>
37#include <asm/cpm1.h>
38
39/*************************************************/
40
41#include "fec_8xx.h"
42
43/*************************************************/
44
45/* Make MII read/write commands for the FEC.
46*/
47#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
48#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
49#define mk_mii_end 0
50
51/*************************************************/
52
53/* XXX both FECs use the MII interface of FEC1 */
54static DEFINE_SPINLOCK(fec_mii_lock);
55
56#define FEC_MII_LOOPS 10000
57
58int fec_mii_read(struct net_device *dev, int phy_id, int location)
59{
60 struct fec_enet_private *fep = netdev_priv(dev);
61 fec_t *fecp;
62 int i, ret = -1;
63 unsigned long flags;
64
65 /* XXX MII interface is only connected to FEC1 */
66 fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec;
67
68 spin_lock_irqsave(&fec_mii_lock, flags);
69
70 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) {
71 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
72 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
73 FW(fecp, ievent, FEC_ENET_MII);
74 }
75
76 /* Add PHY address to register command. */
77 FW(fecp, mii_speed, fep->fec_phy_speed);
78 FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
79
80 for (i = 0; i < FEC_MII_LOOPS; i++)
81 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
82 break;
83
84 if (i < FEC_MII_LOOPS) {
85 FW(fecp, ievent, FEC_ENET_MII);
86 ret = FR(fecp, mii_data) & 0xffff;
87 }
88
89 spin_unlock_irqrestore(&fec_mii_lock, flags);
90
91 return ret;
92}
93
94void fec_mii_write(struct net_device *dev, int phy_id, int location, int value)
95{
96 struct fec_enet_private *fep = netdev_priv(dev);
97 fec_t *fecp;
98 unsigned long flags;
99 int i;
100
101 /* XXX MII interface is only connected to FEC1 */
102 fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec;
103
104 spin_lock_irqsave(&fec_mii_lock, flags);
105
106 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) {
107 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
108 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
109 FW(fecp, ievent, FEC_ENET_MII);
110 }
111
112 /* Add PHY address to register command. */
113 FW(fecp, mii_speed, fep->fec_phy_speed); /* always adapt mii speed */
114 FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
115
116 for (i = 0; i < FEC_MII_LOOPS; i++)
117 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
118 break;
119
120 if (i < FEC_MII_LOOPS)
121 FW(fecp, ievent, FEC_ENET_MII);
122
123 spin_unlock_irqrestore(&fec_mii_lock, flags);
124}
125
126/*************************************************/
127
128#ifdef CONFIG_FEC_8XX_GENERIC_PHY
129
130/*
131 * Generic PHY support.
132 * Should work for all PHYs, but link change is detected by polling
133 */
134
135static void generic_timer_callback(unsigned long data)
136{
137 struct net_device *dev = (struct net_device *)data;
138 struct fec_enet_private *fep = netdev_priv(dev);
139
140 fep->phy_timer_list.expires = jiffies + HZ / 2;
141
142 add_timer(&fep->phy_timer_list);
143
144 fec_mii_link_status_change_check(dev, 0);
145}
146
147static void generic_startup(struct net_device *dev)
148{
149 struct fec_enet_private *fep = netdev_priv(dev);
150
151 fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
152 fep->phy_timer_list.data = (unsigned long)dev;
153 fep->phy_timer_list.function = generic_timer_callback;
154 add_timer(&fep->phy_timer_list);
155}
156
157static void generic_shutdown(struct net_device *dev)
158{
159 struct fec_enet_private *fep = netdev_priv(dev);
160
161 del_timer_sync(&fep->phy_timer_list);
162}
163
164#endif
165
166#ifdef CONFIG_FEC_8XX_DM9161_PHY
167
168/* ------------------------------------------------------------------------- */
169/* The Davicom DM9161 is used on the NETTA board */
170
171/* register definitions */
172
173#define MII_DM9161_ACR 16 /* Aux. Config Register */
174#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
175#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
176#define MII_DM9161_INTR 21 /* Interrupt Register */
177#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
178#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
179
180static void dm9161_startup(struct net_device *dev)
181{
182 struct fec_enet_private *fep = netdev_priv(dev);
183
184 fec_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
185}
186
187static void dm9161_ack_int(struct net_device *dev)
188{
189 struct fec_enet_private *fep = netdev_priv(dev);
190
191 fec_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
192}
193
194static void dm9161_shutdown(struct net_device *dev)
195{
196 struct fec_enet_private *fep = netdev_priv(dev);
197
198 fec_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
199}
200
201#endif
202
203#ifdef CONFIG_FEC_8XX_LXT971_PHY
204
205/* Support for LXT971/972 PHY */
206
207#define MII_LXT971_PCR 16 /* Port Control Register */
208#define MII_LXT971_SR2 17 /* Status Register 2 */
209#define MII_LXT971_IER 18 /* Interrupt Enable Register */
210#define MII_LXT971_ISR 19 /* Interrupt Status Register */
211#define MII_LXT971_LCR 20 /* LED Control Register */
212#define MII_LXT971_TCR 30 /* Transmit Control Register */
213
214static void lxt971_startup(struct net_device *dev)
215{
216 struct fec_enet_private *fep = netdev_priv(dev);
217
218 fec_mii_write(dev, fep->mii_if.phy_id, MII_LXT971_IER, 0x00F2);
219}
220
221static void lxt971_ack_int(struct net_device *dev)
222{
223 struct fec_enet_private *fep = netdev_priv(dev);
224
225 fec_mii_read(dev, fep->mii_if.phy_id, MII_LXT971_ISR);
226}
227
228static void lxt971_shutdown(struct net_device *dev)
229{
230 struct fec_enet_private *fep = netdev_priv(dev);
231
232 fec_mii_write(dev, fep->mii_if.phy_id, MII_LXT971_IER, 0x0000);
233}
234#endif
235
236/**********************************************************************************/
237
238static const struct phy_info phy_info[] = {
239#ifdef CONFIG_FEC_8XX_DM9161_PHY
240 {
241 .id = 0x00181b88,
242 .name = "DM9161",
243 .startup = dm9161_startup,
244 .ack_int = dm9161_ack_int,
245 .shutdown = dm9161_shutdown,
246 },
247#endif
248#ifdef CONFIG_FEC_8XX_LXT971_PHY
249 {
250 .id = 0x0001378e,
251 .name = "LXT971/972",
252 .startup = lxt971_startup,
253 .ack_int = lxt971_ack_int,
254 .shutdown = lxt971_shutdown,
255 },
256#endif
257#ifdef CONFIG_FEC_8XX_GENERIC_PHY
258 {
259 .id = 0,
260 .name = "GENERIC",
261 .startup = generic_startup,
262 .shutdown = generic_shutdown,
263 },
264#endif
265};
266
267/**********************************************************************************/
268
269int fec_mii_phy_id_detect(struct net_device *dev)
270{
271 struct fec_enet_private *fep = netdev_priv(dev);
272 const struct fec_platform_info *fpi = fep->fpi;
273 int i, r, start, end, phytype, physubtype;
274 const struct phy_info *phy;
275 int phy_hwid, phy_id;
276
277 /* if no MDIO */
278 if (fpi->use_mdio == 0)
279 return -1;
280
281 phy_hwid = -1;
282 fep->phy = NULL;
283
284 /* auto-detect? */
285 if (fpi->phy_addr == -1) {
286 start = 0;
287 end = 32;
288 } else { /* direct */
289 start = fpi->phy_addr;
290 end = start + 1;
291 }
292
293 for (phy_id = start; phy_id < end; phy_id++) {
294 r = fec_mii_read(dev, phy_id, MII_PHYSID1);
295 if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
296 continue;
297 r = fec_mii_read(dev, phy_id, MII_PHYSID2);
298 if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
299 continue;
300 phy_hwid = (phytype << 16) | physubtype;
301 if (phy_hwid != -1)
302 break;
303 }
304
305 if (phy_hwid == -1) {
306 printk(KERN_ERR DRV_MODULE_NAME
307 ": %s No PHY detected!\n", dev->name);
308 return -1;
309 }
310
311 for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
312 if (phy->id == (phy_hwid >> 4) || phy->id == 0)
313 break;
314
315 if (i >= ARRAY_SIZE(phy_info)) {
316 printk(KERN_ERR DRV_MODULE_NAME
317 ": %s PHY id 0x%08x is not supported!\n",
318 dev->name, phy_hwid);
319 return -1;
320 }
321
322 fep->phy = phy;
323
324 printk(KERN_INFO DRV_MODULE_NAME
325 ": %s Phy @ 0x%x, type %s (0x%08x)\n",
326 dev->name, phy_id, fep->phy->name, phy_hwid);
327
328 return phy_id;
329}
330
331void fec_mii_startup(struct net_device *dev)
332{
333 struct fec_enet_private *fep = netdev_priv(dev);
334 const struct fec_platform_info *fpi = fep->fpi;
335
336 if (!fpi->use_mdio || fep->phy == NULL)
337 return;
338
339 if (fep->phy->startup == NULL)
340 return;
341
342 (*fep->phy->startup) (dev);
343}
344
345void fec_mii_shutdown(struct net_device *dev)
346{
347 struct fec_enet_private *fep = netdev_priv(dev);
348 const struct fec_platform_info *fpi = fep->fpi;
349
350 if (!fpi->use_mdio || fep->phy == NULL)
351 return;
352
353 if (fep->phy->shutdown == NULL)
354 return;
355
356 (*fep->phy->shutdown) (dev);
357}
358
359void fec_mii_ack_int(struct net_device *dev)
360{
361 struct fec_enet_private *fep = netdev_priv(dev);
362 const struct fec_platform_info *fpi = fep->fpi;
363
364 if (!fpi->use_mdio || fep->phy == NULL)
365 return;
366
367 if (fep->phy->ack_int == NULL)
368 return;
369
370 (*fep->phy->ack_int) (dev);
371}
372
373/* helper function */
374static int mii_negotiated(struct mii_if_info *mii)
375{
376 int advert, lpa, val;
377
378 if (!mii_link_ok(mii))
379 return 0;
380
381 val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
382 if ((val & BMSR_ANEGCOMPLETE) == 0)
383 return 0;
384
385 advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
386 lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
387
388 return mii_nway_result(advert & lpa);
389}
390
391void fec_mii_link_status_change_check(struct net_device *dev, int init_media)
392{
393 struct fec_enet_private *fep = netdev_priv(dev);
394 unsigned int media;
395 unsigned long flags;
396
397 if (mii_check_media(&fep->mii_if, netif_msg_link(fep), init_media) == 0)
398 return;
399
400 media = mii_negotiated(&fep->mii_if);
401
402 if (netif_carrier_ok(dev)) {
403 spin_lock_irqsave(&fep->lock, flags);
404 fec_restart(dev, !!(media & ADVERTISE_FULL),
405 (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) ?
406 100 : 10);
407 spin_unlock_irqrestore(&fep->lock, flags);
408
409 netif_start_queue(dev);
410 } else {
411 netif_stop_queue(dev);
412
413 spin_lock_irqsave(&fep->lock, flags);
414 fec_stop(dev);
415 spin_unlock_irqrestore(&fep->lock, flags);
416
417 }
418}
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index a5baaf59ff66..352574a3f056 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -43,7 +43,7 @@
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44 44
45#ifdef CONFIG_PPC_CPM_NEW_BINDING 45#ifdef CONFIG_PPC_CPM_NEW_BINDING
46#include <asm/of_platform.h> 46#include <linux/of_platform.h>
47#endif 47#endif
48 48
49#include "fs_enet.h" 49#include "fs_enet.h"
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index d7ca31945c82..e3557eca7b6d 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -44,7 +44,7 @@
44#endif 44#endif
45 45
46#ifdef CONFIG_PPC_CPM_NEW_BINDING 46#ifdef CONFIG_PPC_CPM_NEW_BINDING
47#include <asm/of_platform.h> 47#include <linux/of_platform.h>
48#endif 48#endif
49 49
50#include "fs_enet.h" 50#include "fs_enet.h"
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index f0014cfbb275..8f6a43b0e0ff 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -37,7 +37,7 @@
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39#ifdef CONFIG_PPC_CPM_NEW_BINDING 39#ifdef CONFIG_PPC_CPM_NEW_BINDING
40#include <asm/of_platform.h> 40#include <linux/of_platform.h>
41#endif 41#endif
42 42
43#include "fs_enet.h" 43#include "fs_enet.h"
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index babc79ad490b..61af02b4c9d8 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -363,25 +363,31 @@ static int emac_reset(struct emac_instance *dev)
363 363
364static void emac_hash_mc(struct emac_instance *dev) 364static void emac_hash_mc(struct emac_instance *dev)
365{ 365{
366 struct emac_regs __iomem *p = dev->emacp; 366 const int regs = EMAC_XAHT_REGS(dev);
367 u16 gaht[4] = { 0 }; 367 u32 *gaht_base = emac_gaht_base(dev);
368 u32 gaht_temp[regs];
368 struct dev_mc_list *dmi; 369 struct dev_mc_list *dmi;
370 int i;
369 371
370 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count); 372 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
371 373
374 memset(gaht_temp, 0, sizeof (gaht_temp));
375
372 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) { 376 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
373 int bit; 377 int slot, reg, mask;
374 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL, 378 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
375 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], 379 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
376 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]); 380 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
377 381
378 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26); 382 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
379 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f); 383 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
384 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
385
386 gaht_temp[reg] |= mask;
380 } 387 }
381 out_be32(&p->gaht1, gaht[0]); 388
382 out_be32(&p->gaht2, gaht[1]); 389 for (i = 0; i < regs; i++)
383 out_be32(&p->gaht3, gaht[2]); 390 out_be32(gaht_base + i, gaht_temp[i]);
384 out_be32(&p->gaht4, gaht[3]);
385} 391}
386 392
387static inline u32 emac_iff2rmr(struct net_device *ndev) 393static inline u32 emac_iff2rmr(struct net_device *ndev)
@@ -398,7 +404,8 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
398 404
399 if (ndev->flags & IFF_PROMISC) 405 if (ndev->flags & IFF_PROMISC)
400 r |= EMAC_RMR_PME; 406 r |= EMAC_RMR_PME;
401 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32) 407 else if (ndev->flags & IFF_ALLMULTI ||
408 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
402 r |= EMAC_RMR_PMME; 409 r |= EMAC_RMR_PMME;
403 else if (ndev->mc_count > 0) 410 else if (ndev->mc_count > 0)
404 r |= EMAC_RMR_MAE; 411 r |= EMAC_RMR_MAE;
@@ -542,7 +549,7 @@ static int emac_configure(struct emac_instance *dev)
542 /* Put some arbitrary OUI, Manuf & Rev IDs so we can 549 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
543 * identify this GPCS PHY later. 550 * identify this GPCS PHY later.
544 */ 551 */
545 out_be32(&p->ipcr, 0xdeadbeef); 552 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
546 } else 553 } else
547 mr1 |= EMAC_MR1_MF_1000; 554 mr1 |= EMAC_MR1_MF_1000;
548 555
@@ -2021,10 +2028,10 @@ static int emac_get_regs_len(struct emac_instance *dev)
2021{ 2028{
2022 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) 2029 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2023 return sizeof(struct emac_ethtool_regs_subhdr) + 2030 return sizeof(struct emac_ethtool_regs_subhdr) +
2024 EMAC4_ETHTOOL_REGS_SIZE; 2031 EMAC4_ETHTOOL_REGS_SIZE(dev);
2025 else 2032 else
2026 return sizeof(struct emac_ethtool_regs_subhdr) + 2033 return sizeof(struct emac_ethtool_regs_subhdr) +
2027 EMAC_ETHTOOL_REGS_SIZE; 2034 EMAC_ETHTOOL_REGS_SIZE(dev);
2028} 2035}
2029 2036
2030static int emac_ethtool_get_regs_len(struct net_device *ndev) 2037static int emac_ethtool_get_regs_len(struct net_device *ndev)
@@ -2051,12 +2058,12 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2051 hdr->index = dev->cell_index; 2058 hdr->index = dev->cell_index;
2052 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { 2059 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2053 hdr->version = EMAC4_ETHTOOL_REGS_VER; 2060 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2054 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE); 2061 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2055 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE); 2062 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2056 } else { 2063 } else {
2057 hdr->version = EMAC_ETHTOOL_REGS_VER; 2064 hdr->version = EMAC_ETHTOOL_REGS_VER;
2058 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE); 2065 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2059 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE); 2066 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2060 } 2067 }
2061} 2068}
2062 2069
@@ -2546,7 +2553,9 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2546 } 2553 }
2547 2554
2548 /* Check EMAC version */ 2555 /* Check EMAC version */
2549 if (of_device_is_compatible(np, "ibm,emac4")) { 2556 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2557 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2558 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2550 dev->features |= EMAC_FTR_EMAC4; 2559 dev->features |= EMAC_FTR_EMAC4;
2551 if (of_device_is_compatible(np, "ibm,emac-440gx")) 2560 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2552 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX; 2561 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
@@ -2607,6 +2616,15 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2607 } 2616 }
2608 memcpy(dev->ndev->dev_addr, p, 6); 2617 memcpy(dev->ndev->dev_addr, p, 6);
2609 2618
2619 /* IAHT and GAHT filter parameterization */
2620 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2621 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2622 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2623 } else {
2624 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2625 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2626 }
2627
2610 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); 2628 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2611 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); 2629 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2612 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); 2630 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
@@ -2678,7 +2696,8 @@ static int __devinit emac_probe(struct of_device *ofdev,
2678 goto err_irq_unmap; 2696 goto err_irq_unmap;
2679 } 2697 }
2680 // TODO : request_mem_region 2698 // TODO : request_mem_region
2681 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs)); 2699 dev->emacp = ioremap(dev->rsrc_regs.start,
2700 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2682 if (dev->emacp == NULL) { 2701 if (dev->emacp == NULL) {
2683 printk(KERN_ERR "%s: Can't map device registers!\n", 2702 printk(KERN_ERR "%s: Can't map device registers!\n",
2684 np->full_name); 2703 np->full_name);
@@ -2892,6 +2911,10 @@ static struct of_device_id emac_match[] =
2892 .type = "network", 2911 .type = "network",
2893 .compatible = "ibm,emac4", 2912 .compatible = "ibm,emac4",
2894 }, 2913 },
2914 {
2915 .type = "network",
2916 .compatible = "ibm,emac4sync",
2917 },
2895 {}, 2918 {},
2896}; 2919};
2897 2920
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
index 1683db9870a4..6545e69d12c3 100644
--- a/drivers/net/ibm_newemac/core.h
+++ b/drivers/net/ibm_newemac/core.h
@@ -33,8 +33,8 @@
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/of_platform.h>
36 37
37#include <asm/of_platform.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/dcr.h> 39#include <asm/dcr.h>
40 40
@@ -235,6 +235,10 @@ struct emac_instance {
235 u32 fifo_entry_size; 235 u32 fifo_entry_size;
236 u32 mal_burst_size; /* move to MAL ? */ 236 u32 mal_burst_size; /* move to MAL ? */
237 237
238 /* IAHT and GAHT filter parameterization */
239 u32 xaht_slots_shift;
240 u32 xaht_width_shift;
241
238 /* Descriptor management 242 /* Descriptor management
239 */ 243 */
240 struct mal_descriptor *tx_desc; 244 struct mal_descriptor *tx_desc;
@@ -309,6 +313,10 @@ struct emac_instance {
309 * Set if we need phy clock workaround for 440ep or 440gr 313 * Set if we need phy clock workaround for 440ep or 440gr
310 */ 314 */
311#define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100 315#define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100
316/*
317 * The 405EX and 460EX contain the EMAC4SYNC core
318 */
319#define EMAC_FTR_EMAC4SYNC 0x00000200
312 320
313 321
314/* Right now, we don't quite handle the always/possible masks on the 322/* Right now, we don't quite handle the always/possible masks on the
@@ -320,7 +328,8 @@ enum {
320 328
321 EMAC_FTRS_POSSIBLE = 329 EMAC_FTRS_POSSIBLE =
322#ifdef CONFIG_IBM_NEW_EMAC_EMAC4 330#ifdef CONFIG_IBM_NEW_EMAC_EMAC4
323 EMAC_FTR_EMAC4 | EMAC_FTR_HAS_NEW_STACR | 331 EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC |
332 EMAC_FTR_HAS_NEW_STACR |
324 EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX | 333 EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX |
325#endif 334#endif
326#ifdef CONFIG_IBM_NEW_EMAC_TAH 335#ifdef CONFIG_IBM_NEW_EMAC_TAH
@@ -342,6 +351,71 @@ static inline int emac_has_feature(struct emac_instance *dev,
342 (EMAC_FTRS_POSSIBLE & dev->features & feature); 351 (EMAC_FTRS_POSSIBLE & dev->features & feature);
343} 352}
344 353
354/*
355 * Various instances of the EMAC core have varying 1) number of
356 * address match slots, 2) width of the registers for handling address
357 * match slots, 3) number of registers for handling address match
358 * slots and 4) base offset for those registers.
359 *
360 * These macros and inlines handle these differences based on
361 * parameters supplied by the device structure which are, in turn,
362 * initialized based on the "compatible" entry in the device tree.
363 */
364
365#define EMAC4_XAHT_SLOTS_SHIFT 6
366#define EMAC4_XAHT_WIDTH_SHIFT 4
367
368#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8
369#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5
370
371#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift)
372#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift)
373#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \
374 (dev)->xaht_width_shift))
375
376#define EMAC_XAHT_CRC_TO_SLOT(dev, crc) \
377 ((EMAC_XAHT_SLOTS(dev) - 1) - \
378 ((crc) >> ((sizeof (u32) * BITS_PER_BYTE) - \
379 (dev)->xaht_slots_shift)))
380
381#define EMAC_XAHT_SLOT_TO_REG(dev, slot) \
382 ((slot) >> (dev)->xaht_width_shift)
383
384#define EMAC_XAHT_SLOT_TO_MASK(dev, slot) \
385 ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
386 ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
387
388static inline u32 *emac_xaht_base(struct emac_instance *dev)
389{
390 struct emac_regs __iomem *p = dev->emacp;
391 int offset;
392
393 /* The first IAHT entry always is the base of the block of
394 * IAHT and GAHT registers.
395 */
396 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC))
397 offset = offsetof(struct emac_regs, u1.emac4sync.iaht1);
398 else
399 offset = offsetof(struct emac_regs, u0.emac4.iaht1);
400
401 return ((u32 *)((ptrdiff_t)p + offset));
402}
403
404static inline u32 *emac_gaht_base(struct emac_instance *dev)
405{
406 /* GAHT registers always come after an identical number of
407 * IAHT registers.
408 */
409 return (emac_xaht_base(dev) + EMAC_XAHT_REGS(dev));
410}
411
412static inline u32 *emac_iaht_base(struct emac_instance *dev)
413{
414 /* IAHT registers always come before an identical number of
415 * GAHT registers.
416 */
417 return (emac_xaht_base(dev));
418}
345 419
346/* Ethtool get_regs complex data. 420/* Ethtool get_regs complex data.
347 * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH 421 * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH
@@ -366,4 +440,11 @@ struct emac_ethtool_regs_subhdr {
366 u32 index; 440 u32 index;
367}; 441};
368 442
443#define EMAC_ETHTOOL_REGS_VER 0
444#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
445 (dev)->rsrc_regs.start + 1)
446#define EMAC4_ETHTOOL_REGS_VER 1
447#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
448 (dev)->rsrc_regs.start + 1)
449
369#endif /* __IBM_NEWEMAC_CORE_H */ 450#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 86b756a30784..775c850a425a 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -67,29 +67,55 @@ static void emac_desc_dump(struct emac_instance *p)
67static void emac_mac_dump(struct emac_instance *dev) 67static void emac_mac_dump(struct emac_instance *dev)
68{ 68{
69 struct emac_regs __iomem *p = dev->emacp; 69 struct emac_regs __iomem *p = dev->emacp;
70 const int xaht_regs = EMAC_XAHT_REGS(dev);
71 u32 *gaht_base = emac_gaht_base(dev);
72 u32 *iaht_base = emac_iaht_base(dev);
73 int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC);
74 int n;
70 75
71 printk("** EMAC %s registers **\n" 76 printk("** EMAC %s registers **\n"
72 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" 77 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
73 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" 78 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
74 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n" 79 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
75 "IAHT: 0x%04x 0x%04x 0x%04x 0x%04x "
76 "GAHT: 0x%04x 0x%04x 0x%04x 0x%04x\n"
77 "LSA = %04x%08x IPGVR = 0x%04x\n"
78 "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
79 "OCTX = 0x%08x OCRX = 0x%08x IPCR = 0x%08x\n",
80 dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1), 80 dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1),
81 in_be32(&p->tmr0), in_be32(&p->tmr1), 81 in_be32(&p->tmr0), in_be32(&p->tmr1),
82 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), 82 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
83 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), 83 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
84 in_be32(&p->vtci), 84 in_be32(&p->vtci)
85 in_be32(&p->iaht1), in_be32(&p->iaht2), in_be32(&p->iaht3), 85 );
86 in_be32(&p->iaht4), 86
87 in_be32(&p->gaht1), in_be32(&p->gaht2), in_be32(&p->gaht3), 87 if (emac4sync)
88 in_be32(&p->gaht4), 88 printk("MAR = %04x%08x MMAR = %04x%08x\n",
89 in_be32(&p->u0.emac4sync.mahr),
90 in_be32(&p->u0.emac4sync.malr),
91 in_be32(&p->u0.emac4sync.mmahr),
92 in_be32(&p->u0.emac4sync.mmalr)
93 );
94
95 for (n = 0; n < xaht_regs; n++)
96 printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n));
97
98 for (n = 0; n < xaht_regs; n++)
99 printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n));
100
101 printk("LSA = %04x%08x IPGVR = 0x%04x\n"
102 "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
103 "OCTX = 0x%08x OCRX = 0x%08x\n",
89 in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr), 104 in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
90 in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr), 105 in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
91 in_be32(&p->octx), in_be32(&p->ocrx), in_be32(&p->ipcr) 106 in_be32(&p->octx), in_be32(&p->ocrx)
92 ); 107 );
108
109 if (!emac4sync) {
110 printk("IPCR = 0x%08x\n",
111 in_be32(&p->u1.emac4.ipcr)
112 );
113 } else {
114 printk("REVID = 0x%08x TPC = 0x%08x\n",
115 in_be32(&p->u1.emac4sync.revid),
116 in_be32(&p->u1.emac4sync.tpc)
117 );
118 }
93 119
94 emac_desc_dump(dev); 120 emac_desc_dump(dev);
95} 121}
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index 91cb096ab405..0afc2cf5c52b 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -27,37 +27,80 @@
27 27
28#include <linux/types.h> 28#include <linux/types.h>
29 29
30/* EMAC registers Write Access rules */ 30/* EMAC registers Write Access rules */
31struct emac_regs { 31struct emac_regs {
32 u32 mr0; /* special */ 32 /* Common registers across all EMAC implementations. */
33 u32 mr1; /* Reset */ 33 u32 mr0; /* Special */
34 u32 tmr0; /* special */ 34 u32 mr1; /* Reset */
35 u32 tmr1; /* special */ 35 u32 tmr0; /* Special */
36 u32 rmr; /* Reset */ 36 u32 tmr1; /* Special */
37 u32 isr; /* Always */ 37 u32 rmr; /* Reset */
38 u32 iser; /* Reset */ 38 u32 isr; /* Always */
39 u32 iahr; /* Reset, R, T */ 39 u32 iser; /* Reset */
40 u32 ialr; /* Reset, R, T */ 40 u32 iahr; /* Reset, R, T */
41 u32 vtpid; /* Reset, R, T */ 41 u32 ialr; /* Reset, R, T */
42 u32 vtci; /* Reset, R, T */ 42 u32 vtpid; /* Reset, R, T */
43 u32 ptr; /* Reset, T */ 43 u32 vtci; /* Reset, R, T */
44 u32 iaht1; /* Reset, R */ 44 u32 ptr; /* Reset, T */
45 u32 iaht2; /* Reset, R */ 45 union {
46 u32 iaht3; /* Reset, R */ 46 /* Registers unique to EMAC4 implementations */
47 u32 iaht4; /* Reset, R */ 47 struct {
48 u32 gaht1; /* Reset, R */ 48 u32 iaht1; /* Reset, R */
49 u32 gaht2; /* Reset, R */ 49 u32 iaht2; /* Reset, R */
50 u32 gaht3; /* Reset, R */ 50 u32 iaht3; /* Reset, R */
51 u32 gaht4; /* Reset, R */ 51 u32 iaht4; /* Reset, R */
52 u32 gaht1; /* Reset, R */
53 u32 gaht2; /* Reset, R */
54 u32 gaht3; /* Reset, R */
55 u32 gaht4; /* Reset, R */
56 } emac4;
57 /* Registers unique to EMAC4SYNC implementations */
58 struct {
59 u32 mahr; /* Reset, R, T */
60 u32 malr; /* Reset, R, T */
61 u32 mmahr; /* Reset, R, T */
62 u32 mmalr; /* Reset, R, T */
63 u32 rsvd0[4];
64 } emac4sync;
65 } u0;
66 /* Common registers across all EMAC implementations. */
52 u32 lsah; 67 u32 lsah;
53 u32 lsal; 68 u32 lsal;
54 u32 ipgvr; /* Reset, T */ 69 u32 ipgvr; /* Reset, T */
55 u32 stacr; /* special */ 70 u32 stacr; /* Special */
56 u32 trtr; /* special */ 71 u32 trtr; /* Special */
57 u32 rwmr; /* Reset */ 72 u32 rwmr; /* Reset */
58 u32 octx; 73 u32 octx;
59 u32 ocrx; 74 u32 ocrx;
60 u32 ipcr; 75 union {
76 /* Registers unique to EMAC4 implementations */
77 struct {
78 u32 ipcr;
79 } emac4;
80 /* Registers unique to EMAC4SYNC implementations */
81 struct {
82 u32 rsvd1;
83 u32 revid;
84 u32 rsvd2[2];
85 u32 iaht1; /* Reset, R */
86 u32 iaht2; /* Reset, R */
87 u32 iaht3; /* Reset, R */
88 u32 iaht4; /* Reset, R */
89 u32 iaht5; /* Reset, R */
90 u32 iaht6; /* Reset, R */
91 u32 iaht7; /* Reset, R */
92 u32 iaht8; /* Reset, R */
93 u32 gaht1; /* Reset, R */
94 u32 gaht2; /* Reset, R */
95 u32 gaht3; /* Reset, R */
96 u32 gaht4; /* Reset, R */
97 u32 gaht5; /* Reset, R */
98 u32 gaht6; /* Reset, R */
99 u32 gaht7; /* Reset, R */
100 u32 gaht8; /* Reset, R */
101 u32 tpc; /* Reset, T */
102 } emac4sync;
103 } u1;
61}; 104};
62 105
63/* 106/*
@@ -73,12 +116,6 @@ struct emac_regs {
73#define PHY_MODE_RTBI 7 116#define PHY_MODE_RTBI 7
74#define PHY_MODE_SGMII 8 117#define PHY_MODE_SGMII 8
75 118
76
77#define EMAC_ETHTOOL_REGS_VER 0
78#define EMAC_ETHTOOL_REGS_SIZE (sizeof(struct emac_regs) - sizeof(u32))
79#define EMAC4_ETHTOOL_REGS_VER 1
80#define EMAC4_ETHTOOL_REGS_SIZE sizeof(struct emac_regs)
81
82/* EMACx_MR0 */ 119/* EMACx_MR0 */
83#define EMAC_MR0_RXI 0x80000000 120#define EMAC_MR0_RXI 0x80000000
84#define EMAC_MR0_TXI 0x40000000 121#define EMAC_MR0_TXI 0x40000000
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index e32da3de2695..1d5379de6900 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -39,6 +39,7 @@
39#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4)) 39#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4))
40#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4)) 40#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4))
41#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4)) 41#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4))
42#define RGMII_FER_MII(idx) RGMII_FER_GMII(idx)
42 43
43/* RGMIIx_SSR */ 44/* RGMIIx_SSR */
44#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) 45#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
@@ -49,6 +50,7 @@
49static inline int rgmii_valid_mode(int phy_mode) 50static inline int rgmii_valid_mode(int phy_mode)
50{ 51{
51 return phy_mode == PHY_MODE_GMII || 52 return phy_mode == PHY_MODE_GMII ||
53 phy_mode == PHY_MODE_MII ||
52 phy_mode == PHY_MODE_RGMII || 54 phy_mode == PHY_MODE_RGMII ||
53 phy_mode == PHY_MODE_TBI || 55 phy_mode == PHY_MODE_TBI ||
54 phy_mode == PHY_MODE_RTBI; 56 phy_mode == PHY_MODE_RTBI;
@@ -63,6 +65,8 @@ static inline const char *rgmii_mode_name(int mode)
63 return "TBI"; 65 return "TBI";
64 case PHY_MODE_GMII: 66 case PHY_MODE_GMII:
65 return "GMII"; 67 return "GMII";
68 case PHY_MODE_MII:
69 return "MII";
66 case PHY_MODE_RTBI: 70 case PHY_MODE_RTBI:
67 return "RTBI"; 71 return "RTBI";
68 default: 72 default:
@@ -79,6 +83,8 @@ static inline u32 rgmii_mode_mask(int mode, int input)
79 return RGMII_FER_TBI(input); 83 return RGMII_FER_TBI(input);
80 case PHY_MODE_GMII: 84 case PHY_MODE_GMII:
81 return RGMII_FER_GMII(input); 85 return RGMII_FER_GMII(input);
86 case PHY_MODE_MII:
87 return RGMII_FER_MII(input);
82 case PHY_MODE_RTBI: 88 case PHY_MODE_RTBI:
83 return RGMII_FER_RTBI(input); 89 return RGMII_FER_RTBI(input);
84 default: 90 default:
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 665341e43055..387a13395015 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -585,8 +585,9 @@ static struct config_item_type netconsole_target_type = {
585 * Group operations and type for netconsole_subsys. 585 * Group operations and type for netconsole_subsys.
586 */ 586 */
587 587
588static struct config_item *make_netconsole_target(struct config_group *group, 588static int make_netconsole_target(struct config_group *group,
589 const char *name) 589 const char *name,
590 struct config_item **new_item)
590{ 591{
591 unsigned long flags; 592 unsigned long flags;
592 struct netconsole_target *nt; 593 struct netconsole_target *nt;
@@ -598,7 +599,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
598 nt = kzalloc(sizeof(*nt), GFP_KERNEL); 599 nt = kzalloc(sizeof(*nt), GFP_KERNEL);
599 if (!nt) { 600 if (!nt) {
600 printk(KERN_ERR "netconsole: failed to allocate memory\n"); 601 printk(KERN_ERR "netconsole: failed to allocate memory\n");
601 return NULL; 602 return -ENOMEM;
602 } 603 }
603 604
604 nt->np.name = "netconsole"; 605 nt->np.name = "netconsole";
@@ -615,7 +616,8 @@ static struct config_item *make_netconsole_target(struct config_group *group,
615 list_add(&nt->list, &target_list); 616 list_add(&nt->list, &target_list);
616 spin_unlock_irqrestore(&target_list_lock, flags); 617 spin_unlock_irqrestore(&target_list_lock, flags);
617 618
618 return &nt->item; 619 *new_item = &nt->item;
620 return 0;
619} 621}
620 622
621static void drop_netconsole_target(struct config_group *group, 623static void drop_netconsole_target(struct config_group *group,
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index fb0b918e5ccb..402e81020fb8 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -28,8 +28,8 @@
28#include <linux/mii.h> 28#include <linux/mii.h>
29#include <linux/phy.h> 29#include <linux/phy.h>
30#include <linux/workqueue.h> 30#include <linux/workqueue.h>
31#include <linux/of_platform.h>
31 32
32#include <asm/of_platform.h>
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <asm/irq.h> 34#include <asm/irq.h>
35#include <asm/io.h> 35#include <asm/io.h>
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index 940474736922..6d9e7ad9fda9 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -36,8 +36,8 @@
36#include <linux/mii.h> 36#include <linux/mii.h>
37#include <linux/phy.h> 37#include <linux/phy.h>
38#include <linux/fsl_devices.h> 38#include <linux/fsl_devices.h>
39#include <linux/of_platform.h>
39 40
40#include <asm/of_platform.h>
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/irq.h> 42#include <asm/irq.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 3dd537be87d8..b54e2ea8346b 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/net/wireless/libertas/if_sdio.c 2 * linux/drivers/net/wireless/libertas/if_sdio.c
3 * 3 *
4 * Copyright 2007 Pierre Ossman 4 * Copyright 2007-2008 Pierre Ossman
5 * 5 *
6 * Inspired by if_cs.c, Copyright 2007 Holger Schurig 6 * Inspired by if_cs.c, Copyright 2007 Holger Schurig
7 * 7 *
@@ -266,13 +266,10 @@ static int if_sdio_card_to_host(struct if_sdio_card *card)
266 266
267 /* 267 /*
268 * The transfer must be in one transaction or the firmware 268 * The transfer must be in one transaction or the firmware
269 * goes suicidal. 269 * goes suicidal. There's no way to guarantee that for all
270 * controllers, but we can at least try.
270 */ 271 */
271 chunk = size; 272 chunk = sdio_align_size(card->func, size);
272 if ((chunk > card->func->cur_blksize) || (chunk > 512)) {
273 chunk = (chunk + card->func->cur_blksize - 1) /
274 card->func->cur_blksize * card->func->cur_blksize;
275 }
276 273
277 ret = sdio_readsb(card->func, card->buffer, card->ioport, chunk); 274 ret = sdio_readsb(card->func, card->buffer, card->ioport, chunk);
278 if (ret) 275 if (ret)
@@ -696,13 +693,10 @@ static int if_sdio_host_to_card(struct lbs_private *priv,
696 693
697 /* 694 /*
698 * The transfer must be in one transaction or the firmware 695 * The transfer must be in one transaction or the firmware
699 * goes suicidal. 696 * goes suicidal. There's no way to guarantee that for all
697 * controllers, but we can at least try.
700 */ 698 */
701 size = nb + 4; 699 size = sdio_align_size(card->func, nb + 4);
702 if ((size > card->func->cur_blksize) || (size > 512)) {
703 size = (size + card->func->cur_blksize - 1) /
704 card->func->cur_blksize * card->func->cur_blksize;
705 }
706 700
707 packet = kzalloc(sizeof(struct if_sdio_packet) + size, 701 packet = kzalloc(sizeof(struct if_sdio_packet) + size,
708 GFP_ATOMIC); 702 GFP_ATOMIC);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 29681c4b700b..8a1d93a2bb81 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -48,16 +48,32 @@ void of_dev_put(struct of_device *dev)
48} 48}
49EXPORT_SYMBOL(of_dev_put); 49EXPORT_SYMBOL(of_dev_put);
50 50
51static ssize_t dev_show_devspec(struct device *dev, 51static ssize_t devspec_show(struct device *dev,
52 struct device_attribute *attr, char *buf) 52 struct device_attribute *attr, char *buf)
53{ 53{
54 struct of_device *ofdev; 54 struct of_device *ofdev;
55 55
56 ofdev = to_of_device(dev); 56 ofdev = to_of_device(dev);
57 return sprintf(buf, "%s", ofdev->node->full_name); 57 return sprintf(buf, "%s\n", ofdev->node->full_name);
58} 58}
59 59
60static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL); 60static ssize_t modalias_show(struct device *dev,
61 struct device_attribute *attr, char *buf)
62{
63 struct of_device *ofdev = to_of_device(dev);
64 ssize_t len = 0;
65
66 len = of_device_get_modalias(ofdev, buf, PAGE_SIZE - 2);
67 buf[len] = '\n';
68 buf[len+1] = 0;
69 return len+1;
70}
71
72struct device_attribute of_platform_device_attrs[] = {
73 __ATTR_RO(devspec),
74 __ATTR_RO(modalias),
75 __ATTR_NULL
76};
61 77
62/** 78/**
63 * of_release_dev - free an of device structure when all users of it are finished. 79 * of_release_dev - free an of device structure when all users of it are finished.
@@ -78,25 +94,61 @@ EXPORT_SYMBOL(of_release_dev);
78 94
79int of_device_register(struct of_device *ofdev) 95int of_device_register(struct of_device *ofdev)
80{ 96{
81 int rc;
82
83 BUG_ON(ofdev->node == NULL); 97 BUG_ON(ofdev->node == NULL);
84 98 return device_register(&ofdev->dev);
85 rc = device_register(&ofdev->dev);
86 if (rc)
87 return rc;
88
89 rc = device_create_file(&ofdev->dev, &dev_attr_devspec);
90 if (rc)
91 device_unregister(&ofdev->dev);
92
93 return rc;
94} 99}
95EXPORT_SYMBOL(of_device_register); 100EXPORT_SYMBOL(of_device_register);
96 101
97void of_device_unregister(struct of_device *ofdev) 102void of_device_unregister(struct of_device *ofdev)
98{ 103{
99 device_remove_file(&ofdev->dev, &dev_attr_devspec);
100 device_unregister(&ofdev->dev); 104 device_unregister(&ofdev->dev);
101} 105}
102EXPORT_SYMBOL(of_device_unregister); 106EXPORT_SYMBOL(of_device_unregister);
107
108ssize_t of_device_get_modalias(struct of_device *ofdev,
109 char *str, ssize_t len)
110{
111 const char *compat;
112 int cplen, i;
113 ssize_t tsize, csize, repend;
114
115 /* Name & Type */
116 csize = snprintf(str, len, "of:N%sT%s",
117 ofdev->node->name, ofdev->node->type);
118
119 /* Get compatible property if any */
120 compat = of_get_property(ofdev->node, "compatible", &cplen);
121 if (!compat)
122 return csize;
123
124 /* Find true end (we tolerate multiple \0 at the end */
125 for (i = (cplen - 1); i >= 0 && !compat[i]; i--)
126 cplen--;
127 if (!cplen)
128 return csize;
129 cplen++;
130
131 /* Check space (need cplen+1 chars including final \0) */
132 tsize = csize + cplen;
133 repend = tsize;
134
135 if (csize >= len) /* @ the limit, all is already filled */
136 return tsize;
137
138 if (tsize >= len) { /* limit compat list */
139 cplen = len - csize - 1;
140 repend = len;
141 }
142
143 /* Copy and do char replacement */
144 memcpy(&str[csize + 1], compat, cplen);
145 for (i = csize; i < repend; i++) {
146 char c = str[i];
147 if (c == '\0')
148 str[i] = 'C';
149 else if (c == ' ')
150 str[i] = '_';
151 }
152
153 return tsize;
154}
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index 000681e98f2c..1c9cab844f10 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -137,38 +137,6 @@ int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np,
137} 137}
138EXPORT_SYMBOL(of_gpio_simple_xlate); 138EXPORT_SYMBOL(of_gpio_simple_xlate);
139 139
140/* Should be sufficient for now, later we'll use dynamic bases. */
141#if defined(CONFIG_PPC32) || defined(CONFIG_SPARC32)
142#define GPIOS_PER_CHIP 32
143#else
144#define GPIOS_PER_CHIP 64
145#endif
146
147static int of_get_gpiochip_base(struct device_node *np)
148{
149 struct device_node *gc = NULL;
150 int gpiochip_base = 0;
151
152 while ((gc = of_find_all_nodes(gc))) {
153 if (!of_get_property(gc, "gpio-controller", NULL))
154 continue;
155
156 if (gc != np) {
157 gpiochip_base += GPIOS_PER_CHIP;
158 continue;
159 }
160
161 of_node_put(gc);
162
163 if (gpiochip_base >= ARCH_NR_GPIOS)
164 return -ENOSPC;
165
166 return gpiochip_base;
167 }
168
169 return -ENOENT;
170}
171
172/** 140/**
173 * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank) 141 * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank)
174 * @np: device node of the GPIO chip 142 * @np: device node of the GPIO chip
@@ -205,11 +173,7 @@ int of_mm_gpiochip_add(struct device_node *np,
205 if (!mm_gc->regs) 173 if (!mm_gc->regs)
206 goto err1; 174 goto err1;
207 175
208 gc->base = of_get_gpiochip_base(np); 176 gc->base = -1;
209 if (gc->base < 0) {
210 ret = gc->base;
211 goto err1;
212 }
213 177
214 if (!of_gc->xlate) 178 if (!of_gc->xlate)
215 of_gc->xlate = of_gpio_simple_xlate; 179 of_gc->xlate = of_gpio_simple_xlate;
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index b2ccdcbeb896..5c015d310d4a 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_i2c.h>
16#include <linux/module.h> 17#include <linux/module.h>
17 18
18struct i2c_driver_device { 19struct i2c_driver_device {
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index ca09a63a64db..298de0f95d70 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -17,6 +17,8 @@
17#include <linux/of_device.h> 17#include <linux/of_device.h>
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19 19
20extern struct device_attribute of_platform_device_attrs[];
21
20static int of_platform_bus_match(struct device *dev, struct device_driver *drv) 22static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
21{ 23{
22 struct of_device *of_dev = to_of_device(dev); 24 struct of_device *of_dev = to_of_device(dev);
@@ -103,6 +105,7 @@ int of_bus_type_init(struct bus_type *bus, const char *name)
103 bus->suspend = of_platform_device_suspend; 105 bus->suspend = of_platform_device_suspend;
104 bus->resume = of_platform_device_resume; 106 bus->resume = of_platform_device_resume;
105 bus->shutdown = of_platform_device_shutdown; 107 bus->shutdown = of_platform_device_shutdown;
108 bus->dev_attrs = of_platform_device_attrs;
106 return bus_register(bus); 109 return bus_register(bus);
107} 110}
108 111
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 4d1ce2e7361e..7d63f8ced24b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the PCI bus specific drivers. 2# Makefile for the PCI bus specific drivers.
3# 3#
4 4
5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ 5obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o
7obj-$(CONFIG_PROC_FS) += proc.o 7obj-$(CONFIG_PROC_FS) += proc.o
8 8
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index f8c187a763bd..93e37f0666ab 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -30,6 +30,7 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/pci_hotplug.h> 32#include <linux/pci_hotplug.h>
33#include <linux/pci-acpi.h>
33#include <acpi/acpi.h> 34#include <acpi/acpi.h>
34#include <acpi/acpi_bus.h> 35#include <acpi/acpi_bus.h>
35#include <acpi/actypes.h> 36#include <acpi/actypes.h>
@@ -299,7 +300,7 @@ free_and_return:
299 * 300 *
300 * @handle - the handle of the hotplug controller. 301 * @handle - the handle of the hotplug controller.
301 */ 302 */
302acpi_status acpi_run_oshp(acpi_handle handle) 303static acpi_status acpi_run_oshp(acpi_handle handle)
303{ 304{
304 acpi_status status; 305 acpi_status status;
305 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 306 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -322,9 +323,6 @@ acpi_status acpi_run_oshp(acpi_handle handle)
322 kfree(string.pointer); 323 kfree(string.pointer);
323 return status; 324 return status;
324} 325}
325EXPORT_SYMBOL_GPL(acpi_run_oshp);
326
327
328 326
329/* acpi_get_hp_params_from_firmware 327/* acpi_get_hp_params_from_firmware
330 * 328 *
@@ -374,6 +372,85 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
374} 372}
375EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); 373EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
376 374
375/**
376 * acpi_get_hp_hw_control_from_firmware
377 * @dev: the pci_dev of the bridge that has a hotplug controller
378 * @flags: requested control bits for _OSC
379 *
380 * Attempt to take hotplug control from firmware.
381 */
382int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
383{
384 acpi_status status;
385 acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
386 struct pci_dev *pdev = dev;
387 struct pci_bus *parent;
388 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
389
390 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
391 OSC_SHPC_NATIVE_HP_CONTROL |
392 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
393 if (!flags) {
394 err("Invalid flags %u specified!\n", flags);
395 return -EINVAL;
396 }
397
398 /*
399 * Per PCI firmware specification, we should run the ACPI _OSC
400 * method to get control of hotplug hardware before using it. If
401 * an _OSC is missing, we look for an OSHP to do the same thing.
402 * To handle different BIOS behavior, we look for _OSC and OSHP
403 * within the scope of the hotplug controller and its parents,
404 * upto the host bridge under which this controller exists.
405 */
406 while (!handle) {
407 /*
408 * This hotplug controller was not listed in the ACPI name
409 * space at all. Try to get acpi handle of parent pci bus.
410 */
411 if (!pdev || !pdev->bus->parent)
412 break;
413 parent = pdev->bus->parent;
414 dbg("Could not find %s in acpi namespace, trying parent\n",
415 pci_name(pdev));
416 if (!parent->self)
417 /* Parent must be a host bridge */
418 handle = acpi_get_pci_rootbridge_handle(
419 pci_domain_nr(parent),
420 parent->number);
421 else
422 handle = DEVICE_ACPI_HANDLE(&(parent->self->dev));
423 pdev = parent->self;
424 }
425
426 while (handle) {
427 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
428 dbg("Trying to get hotplug control for %s \n",
429 (char *)string.pointer);
430 status = pci_osc_control_set(handle, flags);
431 if (status == AE_NOT_FOUND)
432 status = acpi_run_oshp(handle);
433 if (ACPI_SUCCESS(status)) {
434 dbg("Gained control for hotplug HW for pci %s (%s)\n",
435 pci_name(dev), (char *)string.pointer);
436 kfree(string.pointer);
437 return 0;
438 }
439 if (acpi_root_bridge(handle))
440 break;
441 chandle = handle;
442 status = acpi_get_parent(chandle, &handle);
443 if (ACPI_FAILURE(status))
444 break;
445 }
446
447 dbg("Cannot get control of hotplug hardware for pci %s\n",
448 pci_name(dev));
449
450 kfree(string.pointer);
451 return -ENODEV;
452}
453EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
377 454
378/* acpi_root_bridge - check to see if this acpi object is a root bridge 455/* acpi_root_bridge - check to see if this acpi object is a root bridge
379 * 456 *
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 7a29164d4b32..eecf7cbf4139 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -215,7 +215,6 @@ extern u8 acpiphp_get_power_status (struct acpiphp_slot *slot);
215extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot); 215extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot);
216extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot); 216extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot);
217extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot); 217extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot);
218extern u32 acpiphp_get_address (struct acpiphp_slot *slot);
219 218
220/* variables */ 219/* variables */
221extern int acpiphp_debug; 220extern int acpiphp_debug;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 7af68ba27903..0e496e866a84 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -70,7 +70,6 @@ static int disable_slot (struct hotplug_slot *slot);
70static int set_attention_status (struct hotplug_slot *slot, u8 value); 70static int set_attention_status (struct hotplug_slot *slot, u8 value);
71static int get_power_status (struct hotplug_slot *slot, u8 *value); 71static int get_power_status (struct hotplug_slot *slot, u8 *value);
72static int get_attention_status (struct hotplug_slot *slot, u8 *value); 72static int get_attention_status (struct hotplug_slot *slot, u8 *value);
73static int get_address (struct hotplug_slot *slot, u32 *value);
74static int get_latch_status (struct hotplug_slot *slot, u8 *value); 73static int get_latch_status (struct hotplug_slot *slot, u8 *value);
75static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 74static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
76 75
@@ -83,7 +82,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
83 .get_attention_status = get_attention_status, 82 .get_attention_status = get_attention_status,
84 .get_latch_status = get_latch_status, 83 .get_latch_status = get_latch_status,
85 .get_adapter_status = get_adapter_status, 84 .get_adapter_status = get_adapter_status,
86 .get_address = get_address,
87}; 85};
88 86
89 87
@@ -274,23 +272,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
274 return 0; 272 return 0;
275} 273}
276 274
277
278/**
279 * get_address - get pci address of a slot
280 * @hotplug_slot: slot to get status
281 * @value: pointer to struct pci_busdev (seg, bus, dev)
282 */
283static int get_address(struct hotplug_slot *hotplug_slot, u32 *value)
284{
285 struct slot *slot = hotplug_slot->private;
286
287 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
288
289 *value = acpiphp_get_address(slot->acpi_slot);
290
291 return 0;
292}
293
294static int __init init_acpi(void) 275static int __init init_acpi(void)
295{ 276{
296 int retval; 277 int retval;
@@ -357,7 +338,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
357 acpiphp_slot->slot = slot; 338 acpiphp_slot->slot = slot;
358 snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun); 339 snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun);
359 340
360 retval = pci_hp_register(slot->hotplug_slot); 341 retval = pci_hp_register(slot->hotplug_slot,
342 acpiphp_slot->bridge->pci_bus,
343 acpiphp_slot->device);
344 if (retval == -EBUSY)
345 goto error_hpslot;
361 if (retval) { 346 if (retval) {
362 err("pci_hp_register failed with error %d\n", retval); 347 err("pci_hp_register failed with error %d\n", retval);
363 goto error_hpslot; 348 goto error_hpslot;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 91156f85a926..a3e4705dd8f0 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -258,7 +258,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
258 bridge->pci_bus->number, slot->device); 258 bridge->pci_bus->number, slot->device);
259 retval = acpiphp_register_hotplug_slot(slot); 259 retval = acpiphp_register_hotplug_slot(slot);
260 if (retval) { 260 if (retval) {
261 warn("acpiphp_register_hotplug_slot failed(err code = 0x%x)\n", retval); 261 if (retval == -EBUSY)
262 warn("Slot %d already registered by another "
263 "hotplug driver\n", slot->sun);
264 else
265 warn("acpiphp_register_hotplug_slot failed "
266 "(err code = 0x%x)\n", retval);
262 goto err_exit; 267 goto err_exit;
263 } 268 }
264 } 269 }
@@ -1878,19 +1883,3 @@ u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot)
1878 1883
1879 return (sta == 0) ? 0 : 1; 1884 return (sta == 0) ? 0 : 1;
1880} 1885}
1881
1882
1883/*
1884 * pci address (seg/bus/dev)
1885 */
1886u32 acpiphp_get_address(struct acpiphp_slot *slot)
1887{
1888 u32 address;
1889 struct pci_bus *pci_bus = slot->bridge->pci_bus;
1890
1891 address = (pci_domain_nr(pci_bus) << 16) |
1892 (pci_bus->number << 8) |
1893 slot->device;
1894
1895 return address;
1896}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index ede9051fdb5d..2b7c45e39370 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -33,8 +33,10 @@
33#include <linux/kobject.h> 33#include <linux/kobject.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/pci.h>
36 37
37#include "acpiphp.h" 38#include "acpiphp.h"
39#include "../pci.h"
38 40
39#define DRIVER_VERSION "1.0.1" 41#define DRIVER_VERSION "1.0.1"
40#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" 42#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
@@ -430,7 +432,7 @@ static int __init ibm_acpiphp_init(void)
430 int retval = 0; 432 int retval = 0;
431 acpi_status status; 433 acpi_status status;
432 struct acpi_device *device; 434 struct acpi_device *device;
433 struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; 435 struct kobject *sysdir = &pci_slots_kset->kobj;
434 436
435 dbg("%s\n", __func__); 437 dbg("%s\n", __func__);
436 438
@@ -477,7 +479,7 @@ init_return:
477static void __exit ibm_acpiphp_exit(void) 479static void __exit ibm_acpiphp_exit(void)
478{ 480{
479 acpi_status status; 481 acpi_status status;
480 struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; 482 struct kobject *sysdir = &pci_slots_kset->kobj;
481 483
482 dbg("%s\n", __func__); 484 dbg("%s\n", __func__);
483 485
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d8a6b80ab42a..935947991dc9 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -285,7 +285,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
285 info->attention_status = cpci_get_attention_status(slot); 285 info->attention_status = cpci_get_attention_status(slot);
286 286
287 dbg("registering slot %s", slot->hotplug_slot->name); 287 dbg("registering slot %s", slot->hotplug_slot->name);
288 status = pci_hp_register(slot->hotplug_slot); 288 status = pci_hp_register(slot->hotplug_slot, bus, i);
289 if (status) { 289 if (status) {
290 err("pci_hp_register failed with error %d", status); 290 err("pci_hp_register failed with error %d", status);
291 goto error_name; 291 goto error_name;
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 36b115b27b0b..54defec51d08 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -434,7 +434,9 @@ static int ctrl_slot_setup(struct controller *ctrl,
434 slot->bus, slot->device, 434 slot->bus, slot->device,
435 slot->number, ctrl->slot_device_offset, 435 slot->number, ctrl->slot_device_offset,
436 slot_number); 436 slot_number);
437 result = pci_hp_register(hotplug_slot); 437 result = pci_hp_register(hotplug_slot,
438 ctrl->pci_dev->subordinate,
439 slot->device);
438 if (result) { 440 if (result) {
439 err("pci_hp_register failed with error %d\n", result); 441 err("pci_hp_register failed with error %d\n", result);
440 goto error_name; 442 goto error_name;
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 7e9a827c2687..40337a06c18a 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -66,6 +66,7 @@ struct dummy_slot {
66 struct pci_dev *dev; 66 struct pci_dev *dev;
67 struct work_struct remove_work; 67 struct work_struct remove_work;
68 unsigned long removed; 68 unsigned long removed;
69 char name[8];
69}; 70};
70 71
71static int debug; 72static int debug;
@@ -100,6 +101,7 @@ static int add_slot(struct pci_dev *dev)
100 struct dummy_slot *dslot; 101 struct dummy_slot *dslot;
101 struct hotplug_slot *slot; 102 struct hotplug_slot *slot;
102 int retval = -ENOMEM; 103 int retval = -ENOMEM;
104 static int count = 1;
103 105
104 slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); 106 slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
105 if (!slot) 107 if (!slot)
@@ -113,18 +115,18 @@ static int add_slot(struct pci_dev *dev)
113 slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; 115 slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
114 slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; 116 slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
115 117
116 slot->name = &dev->dev.bus_id[0];
117 dbg("slot->name = %s\n", slot->name);
118
119 dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL); 118 dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
120 if (!dslot) 119 if (!dslot)
121 goto error_info; 120 goto error_info;
122 121
122 slot->name = dslot->name;
123 snprintf(slot->name, sizeof(dslot->name), "fake%d", count++);
124 dbg("slot->name = %s\n", slot->name);
123 slot->ops = &dummy_hotplug_slot_ops; 125 slot->ops = &dummy_hotplug_slot_ops;
124 slot->release = &dummy_release; 126 slot->release = &dummy_release;
125 slot->private = dslot; 127 slot->private = dslot;
126 128
127 retval = pci_hp_register(slot); 129 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn));
128 if (retval) { 130 if (retval) {
129 err("pci_hp_register failed with error %d\n", retval); 131 err("pci_hp_register failed with error %d\n", retval);
130 goto error_dslot; 132 goto error_dslot;
@@ -148,17 +150,17 @@ error:
148static int __init pci_scan_buses(void) 150static int __init pci_scan_buses(void)
149{ 151{
150 struct pci_dev *dev = NULL; 152 struct pci_dev *dev = NULL;
151 int retval = 0; 153 int lastslot = 0;
152 154
153 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 155 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
154 retval = add_slot(dev); 156 if (PCI_FUNC(dev->devfn) > 0 &&
155 if (retval) { 157 lastslot == PCI_SLOT(dev->devfn))
156 pci_dev_put(dev); 158 continue;
157 break; 159 lastslot = PCI_SLOT(dev->devfn);
158 } 160 add_slot(dev);
159 } 161 }
160 162
161 return retval; 163 return 0;
162} 164}
163 165
164static void remove_slot(struct dummy_slot *dslot) 166static void remove_slot(struct dummy_slot *dslot)
@@ -296,23 +298,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
296 return 0; 298 return 0;
297} 299}
298 300
299/* find the hotplug_slot for the pci_dev */
300static struct hotplug_slot *get_slot_from_dev(struct pci_dev *dev)
301{
302 struct dummy_slot *dslot;
303
304 list_for_each_entry(dslot, &slot_list, node) {
305 if (dslot->dev == dev)
306 return dslot->slot;
307 }
308 return NULL;
309}
310
311
312static int disable_slot(struct hotplug_slot *slot) 301static int disable_slot(struct hotplug_slot *slot)
313{ 302{
314 struct dummy_slot *dslot; 303 struct dummy_slot *dslot;
315 struct hotplug_slot *hslot;
316 struct pci_dev *dev; 304 struct pci_dev *dev;
317 int func; 305 int func;
318 306
@@ -322,41 +310,27 @@ static int disable_slot(struct hotplug_slot *slot)
322 310
323 dbg("%s - physical_slot = %s\n", __func__, slot->name); 311 dbg("%s - physical_slot = %s\n", __func__, slot->name);
324 312
325 /* don't disable bridged devices just yet, we can't handle them easily... */ 313 for (func = 7; func >= 0; func--) {
326 if (dslot->dev->subordinate) { 314 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func);
327 err("Can't remove PCI devices with other PCI devices behind it yet.\n"); 315 if (!dev)
328 return -ENODEV; 316 continue;
329 } 317
330 if (test_and_set_bit(0, &dslot->removed)) { 318 if (test_and_set_bit(0, &dslot->removed)) {
331 dbg("Slot already scheduled for removal\n"); 319 dbg("Slot already scheduled for removal\n");
332 return -ENODEV; 320 return -ENODEV;
333 }
334 /* search for subfunctions and disable them first */
335 if (!(dslot->dev->devfn & 7)) {
336 for (func = 1; func < 8; func++) {
337 dev = pci_get_slot(dslot->dev->bus,
338 dslot->dev->devfn + func);
339 if (dev) {
340 hslot = get_slot_from_dev(dev);
341 if (hslot)
342 disable_slot(hslot);
343 else {
344 err("Hotplug slot not found for subfunction of PCI device\n");
345 return -ENODEV;
346 }
347 pci_dev_put(dev);
348 } else
349 dbg("No device in slot found\n");
350 } 321 }
351 }
352 322
353 /* remove the device from the pci core */ 323 /* queue work item to blow away this sysfs entry and other
354 pci_remove_bus_device(dslot->dev); 324 * parts.
325 */
326 INIT_WORK(&dslot->remove_work, remove_slot_worker);
327 queue_work(dummyphp_wq, &dslot->remove_work);
355 328
356 /* queue work item to blow away this sysfs entry and other parts. */ 329 /* blow away this sysfs entry and other parts. */
357 INIT_WORK(&dslot->remove_work, remove_slot_worker); 330 remove_slot(dslot);
358 queue_work(dummyphp_wq, &dslot->remove_work);
359 331
332 pci_dev_put(dev);
333 }
360 return 0; 334 return 0;
361} 335}
362 336
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index dca7efc14be2..8467d0287325 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -1001,7 +1001,8 @@ static int __init ebda_rsrc_controller (void)
1001 tmp_slot = list_entry (list, struct slot, ibm_slot_list); 1001 tmp_slot = list_entry (list, struct slot, ibm_slot_list);
1002 1002
1003 snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); 1003 snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot));
1004 pci_hp_register (tmp_slot->hotplug_slot); 1004 pci_hp_register(tmp_slot->hotplug_slot,
1005 pci_find_bus(0, tmp_slot->bus), tmp_slot->device);
1005 } 1006 }
1006 1007
1007 print_ebda_hpc (); 1008 print_ebda_hpc ();
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index a11021e8ce37..5f85b1b120e3 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -40,6 +40,7 @@
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/pci_hotplug.h> 41#include <linux/pci_hotplug.h>
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include "../pci.h"
43 44
44#define MY_NAME "pci_hotplug" 45#define MY_NAME "pci_hotplug"
45 46
@@ -60,41 +61,7 @@ static int debug;
60////////////////////////////////////////////////////////////////// 61//////////////////////////////////////////////////////////////////
61 62
62static LIST_HEAD(pci_hotplug_slot_list); 63static LIST_HEAD(pci_hotplug_slot_list);
63 64static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock);
64struct kset *pci_hotplug_slots_kset;
65
66static ssize_t hotplug_slot_attr_show(struct kobject *kobj,
67 struct attribute *attr, char *buf)
68{
69 struct hotplug_slot *slot = to_hotplug_slot(kobj);
70 struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr);
71 return attribute->show ? attribute->show(slot, buf) : -EIO;
72}
73
74static ssize_t hotplug_slot_attr_store(struct kobject *kobj,
75 struct attribute *attr, const char *buf, size_t len)
76{
77 struct hotplug_slot *slot = to_hotplug_slot(kobj);
78 struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr);
79 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
80}
81
82static struct sysfs_ops hotplug_slot_sysfs_ops = {
83 .show = hotplug_slot_attr_show,
84 .store = hotplug_slot_attr_store,
85};
86
87static void hotplug_slot_release(struct kobject *kobj)
88{
89 struct hotplug_slot *slot = to_hotplug_slot(kobj);
90 if (slot->release)
91 slot->release(slot);
92}
93
94static struct kobj_type hotplug_slot_ktype = {
95 .sysfs_ops = &hotplug_slot_sysfs_ops,
96 .release = &hotplug_slot_release,
97};
98 65
99/* these strings match up with the values in pci_bus_speed */ 66/* these strings match up with the values in pci_bus_speed */
100static char *pci_bus_speed_strings[] = { 67static char *pci_bus_speed_strings[] = {
@@ -149,16 +116,15 @@ GET_STATUS(power_status, u8)
149GET_STATUS(attention_status, u8) 116GET_STATUS(attention_status, u8)
150GET_STATUS(latch_status, u8) 117GET_STATUS(latch_status, u8)
151GET_STATUS(adapter_status, u8) 118GET_STATUS(adapter_status, u8)
152GET_STATUS(address, u32)
153GET_STATUS(max_bus_speed, enum pci_bus_speed) 119GET_STATUS(max_bus_speed, enum pci_bus_speed)
154GET_STATUS(cur_bus_speed, enum pci_bus_speed) 120GET_STATUS(cur_bus_speed, enum pci_bus_speed)
155 121
156static ssize_t power_read_file (struct hotplug_slot *slot, char *buf) 122static ssize_t power_read_file(struct pci_slot *slot, char *buf)
157{ 123{
158 int retval; 124 int retval;
159 u8 value; 125 u8 value;
160 126
161 retval = get_power_status (slot, &value); 127 retval = get_power_status(slot->hotplug, &value);
162 if (retval) 128 if (retval)
163 goto exit; 129 goto exit;
164 retval = sprintf (buf, "%d\n", value); 130 retval = sprintf (buf, "%d\n", value);
@@ -166,9 +132,10 @@ exit:
166 return retval; 132 return retval;
167} 133}
168 134
169static ssize_t power_write_file (struct hotplug_slot *slot, const char *buf, 135static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
170 size_t count) 136 size_t count)
171{ 137{
138 struct hotplug_slot *slot = pci_slot->hotplug;
172 unsigned long lpower; 139 unsigned long lpower;
173 u8 power; 140 u8 power;
174 int retval = 0; 141 int retval = 0;
@@ -204,29 +171,30 @@ exit:
204 return count; 171 return count;
205} 172}
206 173
207static struct hotplug_slot_attribute hotplug_slot_attr_power = { 174static struct pci_slot_attribute hotplug_slot_attr_power = {
208 .attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR}, 175 .attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR},
209 .show = power_read_file, 176 .show = power_read_file,
210 .store = power_write_file 177 .store = power_write_file
211}; 178};
212 179
213static ssize_t attention_read_file (struct hotplug_slot *slot, char *buf) 180static ssize_t attention_read_file(struct pci_slot *slot, char *buf)
214{ 181{
215 int retval; 182 int retval;
216 u8 value; 183 u8 value;
217 184
218 retval = get_attention_status (slot, &value); 185 retval = get_attention_status(slot->hotplug, &value);
219 if (retval) 186 if (retval)
220 goto exit; 187 goto exit;
221 retval = sprintf (buf, "%d\n", value); 188 retval = sprintf(buf, "%d\n", value);
222 189
223exit: 190exit:
224 return retval; 191 return retval;
225} 192}
226 193
227static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf, 194static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
228 size_t count) 195 size_t count)
229{ 196{
197 struct hotplug_slot_ops *ops = slot->hotplug->ops;
230 unsigned long lattention; 198 unsigned long lattention;
231 u8 attention; 199 u8 attention;
232 int retval = 0; 200 int retval = 0;
@@ -235,13 +203,13 @@ static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf,
235 attention = (u8)(lattention & 0xff); 203 attention = (u8)(lattention & 0xff);
236 dbg (" - attention = %d\n", attention); 204 dbg (" - attention = %d\n", attention);
237 205
238 if (!try_module_get(slot->ops->owner)) { 206 if (!try_module_get(ops->owner)) {
239 retval = -ENODEV; 207 retval = -ENODEV;
240 goto exit; 208 goto exit;
241 } 209 }
242 if (slot->ops->set_attention_status) 210 if (ops->set_attention_status)
243 retval = slot->ops->set_attention_status(slot, attention); 211 retval = ops->set_attention_status(slot->hotplug, attention);
244 module_put(slot->ops->owner); 212 module_put(ops->owner);
245 213
246exit: 214exit:
247 if (retval) 215 if (retval)
@@ -249,18 +217,18 @@ exit:
249 return count; 217 return count;
250} 218}
251 219
252static struct hotplug_slot_attribute hotplug_slot_attr_attention = { 220static struct pci_slot_attribute hotplug_slot_attr_attention = {
253 .attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR}, 221 .attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR},
254 .show = attention_read_file, 222 .show = attention_read_file,
255 .store = attention_write_file 223 .store = attention_write_file
256}; 224};
257 225
258static ssize_t latch_read_file (struct hotplug_slot *slot, char *buf) 226static ssize_t latch_read_file(struct pci_slot *slot, char *buf)
259{ 227{
260 int retval; 228 int retval;
261 u8 value; 229 u8 value;
262 230
263 retval = get_latch_status (slot, &value); 231 retval = get_latch_status(slot->hotplug, &value);
264 if (retval) 232 if (retval)
265 goto exit; 233 goto exit;
266 retval = sprintf (buf, "%d\n", value); 234 retval = sprintf (buf, "%d\n", value);
@@ -269,17 +237,17 @@ exit:
269 return retval; 237 return retval;
270} 238}
271 239
272static struct hotplug_slot_attribute hotplug_slot_attr_latch = { 240static struct pci_slot_attribute hotplug_slot_attr_latch = {
273 .attr = {.name = "latch", .mode = S_IFREG | S_IRUGO}, 241 .attr = {.name = "latch", .mode = S_IFREG | S_IRUGO},
274 .show = latch_read_file, 242 .show = latch_read_file,
275}; 243};
276 244
277static ssize_t presence_read_file (struct hotplug_slot *slot, char *buf) 245static ssize_t presence_read_file(struct pci_slot *slot, char *buf)
278{ 246{
279 int retval; 247 int retval;
280 u8 value; 248 u8 value;
281 249
282 retval = get_adapter_status (slot, &value); 250 retval = get_adapter_status(slot->hotplug, &value);
283 if (retval) 251 if (retval)
284 goto exit; 252 goto exit;
285 retval = sprintf (buf, "%d\n", value); 253 retval = sprintf (buf, "%d\n", value);
@@ -288,42 +256,20 @@ exit:
288 return retval; 256 return retval;
289} 257}
290 258
291static struct hotplug_slot_attribute hotplug_slot_attr_presence = { 259static struct pci_slot_attribute hotplug_slot_attr_presence = {
292 .attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO}, 260 .attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO},
293 .show = presence_read_file, 261 .show = presence_read_file,
294}; 262};
295 263
296static ssize_t address_read_file (struct hotplug_slot *slot, char *buf)
297{
298 int retval;
299 u32 address;
300
301 retval = get_address (slot, &address);
302 if (retval)
303 goto exit;
304 retval = sprintf (buf, "%04x:%02x:%02x\n",
305 (address >> 16) & 0xffff,
306 (address >> 8) & 0xff,
307 address & 0xff);
308
309exit:
310 return retval;
311}
312
313static struct hotplug_slot_attribute hotplug_slot_attr_address = {
314 .attr = {.name = "address", .mode = S_IFREG | S_IRUGO},
315 .show = address_read_file,
316};
317
318static char *unknown_speed = "Unknown bus speed"; 264static char *unknown_speed = "Unknown bus speed";
319 265
320static ssize_t max_bus_speed_read_file (struct hotplug_slot *slot, char *buf) 266static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf)
321{ 267{
322 char *speed_string; 268 char *speed_string;
323 int retval; 269 int retval;
324 enum pci_bus_speed value; 270 enum pci_bus_speed value;
325 271
326 retval = get_max_bus_speed (slot, &value); 272 retval = get_max_bus_speed(slot->hotplug, &value);
327 if (retval) 273 if (retval)
328 goto exit; 274 goto exit;
329 275
@@ -338,18 +284,18 @@ exit:
338 return retval; 284 return retval;
339} 285}
340 286
341static struct hotplug_slot_attribute hotplug_slot_attr_max_bus_speed = { 287static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = {
342 .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO}, 288 .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO},
343 .show = max_bus_speed_read_file, 289 .show = max_bus_speed_read_file,
344}; 290};
345 291
346static ssize_t cur_bus_speed_read_file (struct hotplug_slot *slot, char *buf) 292static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf)
347{ 293{
348 char *speed_string; 294 char *speed_string;
349 int retval; 295 int retval;
350 enum pci_bus_speed value; 296 enum pci_bus_speed value;
351 297
352 retval = get_cur_bus_speed (slot, &value); 298 retval = get_cur_bus_speed(slot->hotplug, &value);
353 if (retval) 299 if (retval)
354 goto exit; 300 goto exit;
355 301
@@ -364,14 +310,15 @@ exit:
364 return retval; 310 return retval;
365} 311}
366 312
367static struct hotplug_slot_attribute hotplug_slot_attr_cur_bus_speed = { 313static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = {
368 .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO}, 314 .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO},
369 .show = cur_bus_speed_read_file, 315 .show = cur_bus_speed_read_file,
370}; 316};
371 317
372static ssize_t test_write_file (struct hotplug_slot *slot, const char *buf, 318static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
373 size_t count) 319 size_t count)
374{ 320{
321 struct hotplug_slot *slot = pci_slot->hotplug;
375 unsigned long ltest; 322 unsigned long ltest;
376 u32 test; 323 u32 test;
377 int retval = 0; 324 int retval = 0;
@@ -394,13 +341,14 @@ exit:
394 return count; 341 return count;
395} 342}
396 343
397static struct hotplug_slot_attribute hotplug_slot_attr_test = { 344static struct pci_slot_attribute hotplug_slot_attr_test = {
398 .attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR}, 345 .attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR},
399 .store = test_write_file 346 .store = test_write_file
400}; 347};
401 348
402static int has_power_file (struct hotplug_slot *slot) 349static int has_power_file(struct pci_slot *pci_slot)
403{ 350{
351 struct hotplug_slot *slot = pci_slot->hotplug;
404 if ((!slot) || (!slot->ops)) 352 if ((!slot) || (!slot->ops))
405 return -ENODEV; 353 return -ENODEV;
406 if ((slot->ops->enable_slot) || 354 if ((slot->ops->enable_slot) ||
@@ -410,8 +358,9 @@ static int has_power_file (struct hotplug_slot *slot)
410 return -ENOENT; 358 return -ENOENT;
411} 359}
412 360
413static int has_attention_file (struct hotplug_slot *slot) 361static int has_attention_file(struct pci_slot *pci_slot)
414{ 362{
363 struct hotplug_slot *slot = pci_slot->hotplug;
415 if ((!slot) || (!slot->ops)) 364 if ((!slot) || (!slot->ops))
416 return -ENODEV; 365 return -ENODEV;
417 if ((slot->ops->set_attention_status) || 366 if ((slot->ops->set_attention_status) ||
@@ -420,8 +369,9 @@ static int has_attention_file (struct hotplug_slot *slot)
420 return -ENOENT; 369 return -ENOENT;
421} 370}
422 371
423static int has_latch_file (struct hotplug_slot *slot) 372static int has_latch_file(struct pci_slot *pci_slot)
424{ 373{
374 struct hotplug_slot *slot = pci_slot->hotplug;
425 if ((!slot) || (!slot->ops)) 375 if ((!slot) || (!slot->ops))
426 return -ENODEV; 376 return -ENODEV;
427 if (slot->ops->get_latch_status) 377 if (slot->ops->get_latch_status)
@@ -429,8 +379,9 @@ static int has_latch_file (struct hotplug_slot *slot)
429 return -ENOENT; 379 return -ENOENT;
430} 380}
431 381
432static int has_adapter_file (struct hotplug_slot *slot) 382static int has_adapter_file(struct pci_slot *pci_slot)
433{ 383{
384 struct hotplug_slot *slot = pci_slot->hotplug;
434 if ((!slot) || (!slot->ops)) 385 if ((!slot) || (!slot->ops))
435 return -ENODEV; 386 return -ENODEV;
436 if (slot->ops->get_adapter_status) 387 if (slot->ops->get_adapter_status)
@@ -438,17 +389,9 @@ static int has_adapter_file (struct hotplug_slot *slot)
438 return -ENOENT; 389 return -ENOENT;
439} 390}
440 391
441static int has_address_file (struct hotplug_slot *slot) 392static int has_max_bus_speed_file(struct pci_slot *pci_slot)
442{
443 if ((!slot) || (!slot->ops))
444 return -ENODEV;
445 if (slot->ops->get_address)
446 return 0;
447 return -ENOENT;
448}
449
450static int has_max_bus_speed_file (struct hotplug_slot *slot)
451{ 393{
394 struct hotplug_slot *slot = pci_slot->hotplug;
452 if ((!slot) || (!slot->ops)) 395 if ((!slot) || (!slot->ops))
453 return -ENODEV; 396 return -ENODEV;
454 if (slot->ops->get_max_bus_speed) 397 if (slot->ops->get_max_bus_speed)
@@ -456,8 +399,9 @@ static int has_max_bus_speed_file (struct hotplug_slot *slot)
456 return -ENOENT; 399 return -ENOENT;
457} 400}
458 401
459static int has_cur_bus_speed_file (struct hotplug_slot *slot) 402static int has_cur_bus_speed_file(struct pci_slot *pci_slot)
460{ 403{
404 struct hotplug_slot *slot = pci_slot->hotplug;
461 if ((!slot) || (!slot->ops)) 405 if ((!slot) || (!slot->ops))
462 return -ENODEV; 406 return -ENODEV;
463 if (slot->ops->get_cur_bus_speed) 407 if (slot->ops->get_cur_bus_speed)
@@ -465,8 +409,9 @@ static int has_cur_bus_speed_file (struct hotplug_slot *slot)
465 return -ENOENT; 409 return -ENOENT;
466} 410}
467 411
468static int has_test_file (struct hotplug_slot *slot) 412static int has_test_file(struct pci_slot *pci_slot)
469{ 413{
414 struct hotplug_slot *slot = pci_slot->hotplug;
470 if ((!slot) || (!slot->ops)) 415 if ((!slot) || (!slot->ops))
471 return -ENODEV; 416 return -ENODEV;
472 if (slot->ops->hardware_test) 417 if (slot->ops->hardware_test)
@@ -474,7 +419,7 @@ static int has_test_file (struct hotplug_slot *slot)
474 return -ENOENT; 419 return -ENOENT;
475} 420}
476 421
477static int fs_add_slot (struct hotplug_slot *slot) 422static int fs_add_slot(struct pci_slot *slot)
478{ 423{
479 int retval = 0; 424 int retval = 0;
480 425
@@ -505,13 +450,6 @@ static int fs_add_slot (struct hotplug_slot *slot)
505 goto exit_adapter; 450 goto exit_adapter;
506 } 451 }
507 452
508 if (has_address_file(slot) == 0) {
509 retval = sysfs_create_file(&slot->kobj,
510 &hotplug_slot_attr_address.attr);
511 if (retval)
512 goto exit_address;
513 }
514
515 if (has_max_bus_speed_file(slot) == 0) { 453 if (has_max_bus_speed_file(slot) == 0) {
516 retval = sysfs_create_file(&slot->kobj, 454 retval = sysfs_create_file(&slot->kobj,
517 &hotplug_slot_attr_max_bus_speed.attr); 455 &hotplug_slot_attr_max_bus_speed.attr);
@@ -544,10 +482,6 @@ exit_cur_speed:
544 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 482 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
545 483
546exit_max_speed: 484exit_max_speed:
547 if (has_address_file(slot) == 0)
548 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr);
549
550exit_address:
551 if (has_adapter_file(slot) == 0) 485 if (has_adapter_file(slot) == 0)
552 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 486 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
553 487
@@ -567,7 +501,7 @@ exit:
567 return retval; 501 return retval;
568} 502}
569 503
570static void fs_remove_slot (struct hotplug_slot *slot) 504static void fs_remove_slot(struct pci_slot *slot)
571{ 505{
572 if (has_power_file(slot) == 0) 506 if (has_power_file(slot) == 0)
573 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); 507 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
@@ -581,9 +515,6 @@ static void fs_remove_slot (struct hotplug_slot *slot)
581 if (has_adapter_file(slot) == 0) 515 if (has_adapter_file(slot) == 0)
582 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); 516 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
583 517
584 if (has_address_file(slot) == 0)
585 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr);
586
587 if (has_max_bus_speed_file(slot) == 0) 518 if (has_max_bus_speed_file(slot) == 0)
588 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); 519 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
589 520
@@ -599,27 +530,33 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
599 struct hotplug_slot *slot; 530 struct hotplug_slot *slot;
600 struct list_head *tmp; 531 struct list_head *tmp;
601 532
533 spin_lock(&pci_hotplug_slot_list_lock);
602 list_for_each (tmp, &pci_hotplug_slot_list) { 534 list_for_each (tmp, &pci_hotplug_slot_list) {
603 slot = list_entry (tmp, struct hotplug_slot, slot_list); 535 slot = list_entry (tmp, struct hotplug_slot, slot_list);
604 if (strcmp(slot->name, name) == 0) 536 if (strcmp(slot->name, name) == 0)
605 return slot; 537 goto out;
606 } 538 }
607 return NULL; 539 slot = NULL;
540out:
541 spin_unlock(&pci_hotplug_slot_list_lock);
542 return slot;
608} 543}
609 544
610/** 545/**
611 * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem 546 * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
547 * @bus: bus this slot is on
612 * @slot: pointer to the &struct hotplug_slot to register 548 * @slot: pointer to the &struct hotplug_slot to register
549 * @slot_nr: slot number
613 * 550 *
614 * Registers a hotplug slot with the pci hotplug subsystem, which will allow 551 * Registers a hotplug slot with the pci hotplug subsystem, which will allow
615 * userspace interaction to the slot. 552 * userspace interaction to the slot.
616 * 553 *
617 * Returns 0 if successful, anything else for an error. 554 * Returns 0 if successful, anything else for an error.
618 */ 555 */
619int pci_hp_register (struct hotplug_slot *slot) 556int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr)
620{ 557{
621 int result; 558 int result;
622 struct hotplug_slot *tmp; 559 struct pci_slot *pci_slot;
623 560
624 if (slot == NULL) 561 if (slot == NULL)
625 return -ENODEV; 562 return -ENODEV;
@@ -632,57 +569,89 @@ int pci_hp_register (struct hotplug_slot *slot)
632 } 569 }
633 570
634 /* Check if we have already registered a slot with the same name. */ 571 /* Check if we have already registered a slot with the same name. */
635 tmp = get_slot_from_name(slot->name); 572 if (get_slot_from_name(slot->name))
636 if (tmp)
637 return -EEXIST; 573 return -EEXIST;
638 574
639 slot->kobj.kset = pci_hotplug_slots_kset; 575 /*
640 result = kobject_init_and_add(&slot->kobj, &hotplug_slot_ktype, NULL, 576 * No problems if we call this interface from both ACPI_PCI_SLOT
641 "%s", slot->name); 577 * driver and call it here again. If we've already created the
642 if (result) { 578 * pci_slot, the interface will simply bump the refcount.
643 err("Unable to register kobject '%s'", slot->name); 579 */
644 return -EINVAL; 580 pci_slot = pci_create_slot(bus, slot_nr, slot->name);
581 if (IS_ERR(pci_slot))
582 return PTR_ERR(pci_slot);
583
584 if (pci_slot->hotplug) {
585 dbg("%s: already claimed\n", __func__);
586 pci_destroy_slot(pci_slot);
587 return -EBUSY;
645 } 588 }
646 589
647 list_add (&slot->slot_list, &pci_hotplug_slot_list); 590 slot->pci_slot = pci_slot;
591 pci_slot->hotplug = slot;
592
593 /*
594 * Allow pcihp drivers to override the ACPI_PCI_SLOT name.
595 */
596 if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) {
597 result = kobject_rename(&pci_slot->kobj, slot->name);
598 if (result) {
599 pci_destroy_slot(pci_slot);
600 return result;
601 }
602 }
603
604 spin_lock(&pci_hotplug_slot_list_lock);
605 list_add(&slot->slot_list, &pci_hotplug_slot_list);
606 spin_unlock(&pci_hotplug_slot_list_lock);
607
608 result = fs_add_slot(pci_slot);
609 kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
610 dbg("Added slot %s to the list\n", slot->name);
611
648 612
649 result = fs_add_slot (slot);
650 kobject_uevent(&slot->kobj, KOBJ_ADD);
651 dbg ("Added slot %s to the list\n", slot->name);
652 return result; 613 return result;
653} 614}
654 615
655/** 616/**
656 * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem 617 * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
657 * @slot: pointer to the &struct hotplug_slot to deregister 618 * @hotplug: pointer to the &struct hotplug_slot to deregister
658 * 619 *
659 * The @slot must have been registered with the pci hotplug subsystem 620 * The @slot must have been registered with the pci hotplug subsystem
660 * previously with a call to pci_hp_register(). 621 * previously with a call to pci_hp_register().
661 * 622 *
662 * Returns 0 if successful, anything else for an error. 623 * Returns 0 if successful, anything else for an error.
663 */ 624 */
664int pci_hp_deregister (struct hotplug_slot *slot) 625int pci_hp_deregister(struct hotplug_slot *hotplug)
665{ 626{
666 struct hotplug_slot *temp; 627 struct hotplug_slot *temp;
628 struct pci_slot *slot;
667 629
668 if (slot == NULL) 630 if (!hotplug)
669 return -ENODEV; 631 return -ENODEV;
670 632
671 temp = get_slot_from_name (slot->name); 633 temp = get_slot_from_name(hotplug->name);
672 if (temp != slot) { 634 if (temp != hotplug)
673 return -ENODEV; 635 return -ENODEV;
674 }
675 list_del (&slot->slot_list);
676 636
677 fs_remove_slot (slot); 637 spin_lock(&pci_hotplug_slot_list_lock);
678 dbg ("Removed slot %s from the list\n", slot->name); 638 list_del(&hotplug->slot_list);
679 kobject_put(&slot->kobj); 639 spin_unlock(&pci_hotplug_slot_list_lock);
640
641 slot = hotplug->pci_slot;
642 fs_remove_slot(slot);
643 dbg("Removed slot %s from the list\n", hotplug->name);
644
645 hotplug->release(hotplug);
646 slot->hotplug = NULL;
647 pci_destroy_slot(slot);
648
680 return 0; 649 return 0;
681} 650}
682 651
683/** 652/**
684 * pci_hp_change_slot_info - changes the slot's information structure in the core 653 * pci_hp_change_slot_info - changes the slot's information structure in the core
685 * @slot: pointer to the slot whose info has changed 654 * @hotplug: pointer to the slot whose info has changed
686 * @info: pointer to the info copy into the slot's info structure 655 * @info: pointer to the info copy into the slot's info structure
687 * 656 *
688 * @slot must have been registered with the pci 657 * @slot must have been registered with the pci
@@ -690,13 +659,15 @@ int pci_hp_deregister (struct hotplug_slot *slot)
690 * 659 *
691 * Returns 0 if successful, anything else for an error. 660 * Returns 0 if successful, anything else for an error.
692 */ 661 */
693int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, 662int __must_check pci_hp_change_slot_info(struct hotplug_slot *hotplug,
694 struct hotplug_slot_info *info) 663 struct hotplug_slot_info *info)
695{ 664{
696 if ((slot == NULL) || (info == NULL)) 665 struct pci_slot *slot;
666 if (!hotplug || !info)
697 return -ENODEV; 667 return -ENODEV;
668 slot = hotplug->pci_slot;
698 669
699 memcpy (slot->info, info, sizeof (struct hotplug_slot_info)); 670 memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
700 671
701 return 0; 672 return 0;
702} 673}
@@ -704,36 +675,22 @@ int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
704static int __init pci_hotplug_init (void) 675static int __init pci_hotplug_init (void)
705{ 676{
706 int result; 677 int result;
707 struct kset *pci_bus_kset;
708 678
709 pci_bus_kset = bus_get_kset(&pci_bus_type);
710
711 pci_hotplug_slots_kset = kset_create_and_add("slots", NULL,
712 &pci_bus_kset->kobj);
713 if (!pci_hotplug_slots_kset) {
714 result = -ENOMEM;
715 err("Register subsys error\n");
716 goto exit;
717 }
718 result = cpci_hotplug_init(debug); 679 result = cpci_hotplug_init(debug);
719 if (result) { 680 if (result) {
720 err ("cpci_hotplug_init with error %d\n", result); 681 err ("cpci_hotplug_init with error %d\n", result);
721 goto err_subsys; 682 goto err_cpci;
722 } 683 }
723 684
724 info (DRIVER_DESC " version: " DRIVER_VERSION "\n"); 685 info (DRIVER_DESC " version: " DRIVER_VERSION "\n");
725 goto exit;
726 686
727err_subsys: 687err_cpci:
728 kset_unregister(pci_hotplug_slots_kset);
729exit:
730 return result; 688 return result;
731} 689}
732 690
733static void __exit pci_hotplug_exit (void) 691static void __exit pci_hotplug_exit (void)
734{ 692{
735 cpci_hotplug_exit(); 693 cpci_hotplug_exit();
736 kset_unregister(pci_hotplug_slots_kset);
737} 694}
738 695
739module_init(pci_hotplug_init); 696module_init(pci_hotplug_init);
@@ -745,7 +702,6 @@ MODULE_LICENSE("GPL");
745module_param(debug, bool, 0644); 702module_param(debug, bool, 0644);
746MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); 703MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
747 704
748EXPORT_SYMBOL_GPL(pci_hotplug_slots_kset);
749EXPORT_SYMBOL_GPL(pci_hp_register); 705EXPORT_SYMBOL_GPL(pci_hp_register);
750EXPORT_SYMBOL_GPL(pci_hp_deregister); 706EXPORT_SYMBOL_GPL(pci_hp_deregister);
751EXPORT_SYMBOL_GPL(pci_hp_change_slot_info); 707EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 79c9ddaad3fb..e3a1e7e7dba2 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -43,6 +43,7 @@ extern int pciehp_poll_mode;
43extern int pciehp_poll_time; 43extern int pciehp_poll_time;
44extern int pciehp_debug; 44extern int pciehp_debug;
45extern int pciehp_force; 45extern int pciehp_force;
46extern int pciehp_slot_with_bus;
46extern struct workqueue_struct *pciehp_wq; 47extern struct workqueue_struct *pciehp_wq;
47 48
48#define dbg(format, arg...) \ 49#define dbg(format, arg...) \
@@ -96,7 +97,7 @@ struct controller {
96 u32 slot_cap; 97 u32 slot_cap;
97 u8 cap_base; 98 u8 cap_base;
98 struct timer_list poll_timer; 99 struct timer_list poll_timer;
99 volatile int cmd_busy; 100 int cmd_busy;
100 unsigned int no_cmd_complete:1; 101 unsigned int no_cmd_complete:1;
101}; 102};
102 103
@@ -156,10 +157,10 @@ extern u8 pciehp_handle_power_fault(struct slot *p_slot);
156extern int pciehp_configure_device(struct slot *p_slot); 157extern int pciehp_configure_device(struct slot *p_slot);
157extern int pciehp_unconfigure_device(struct slot *p_slot); 158extern int pciehp_unconfigure_device(struct slot *p_slot);
158extern void pciehp_queue_pushbutton_work(struct work_struct *work); 159extern void pciehp_queue_pushbutton_work(struct work_struct *work);
159int pcie_init(struct controller *ctrl, struct pcie_device *dev); 160struct controller *pcie_init(struct pcie_device *dev);
160int pciehp_enable_slot(struct slot *p_slot); 161int pciehp_enable_slot(struct slot *p_slot);
161int pciehp_disable_slot(struct slot *p_slot); 162int pciehp_disable_slot(struct slot *p_slot);
162int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev); 163int pcie_enable_notification(struct controller *ctrl);
163 164
164static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) 165static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
165{ 166{
@@ -202,8 +203,13 @@ struct hpc_ops {
202#include <acpi/actypes.h> 203#include <acpi/actypes.h>
203#include <linux/pci-acpi.h> 204#include <linux/pci-acpi.h>
204 205
205#define pciehp_get_hp_hw_control_from_firmware(dev) \ 206static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
206 pciehp_acpi_get_hp_hw_control_from_firmware(dev) 207{
208 u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
209 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
210 return acpi_get_hp_hw_control_from_firmware(dev, flags);
211}
212
207static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, 213static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
208 struct hotplug_params *hpp) 214 struct hotplug_params *hpp)
209{ 215{
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 48a2ed378914..3677495c4f91 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -72,7 +72,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
72static int get_attention_status (struct hotplug_slot *slot, u8 *value); 72static int get_attention_status (struct hotplug_slot *slot, u8 *value);
73static int get_latch_status (struct hotplug_slot *slot, u8 *value); 73static int get_latch_status (struct hotplug_slot *slot, u8 *value);
74static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 74static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
75static int get_address (struct hotplug_slot *slot, u32 *value);
76static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 75static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
77static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 76static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
78 77
@@ -85,7 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
85 .get_attention_status = get_attention_status, 84 .get_attention_status = get_attention_status,
86 .get_latch_status = get_latch_status, 85 .get_latch_status = get_latch_status,
87 .get_adapter_status = get_adapter_status, 86 .get_adapter_status = get_adapter_status,
88 .get_address = get_address,
89 .get_max_bus_speed = get_max_bus_speed, 87 .get_max_bus_speed = get_max_bus_speed,
90 .get_cur_bus_speed = get_cur_bus_speed, 88 .get_cur_bus_speed = get_cur_bus_speed,
91}; 89};
@@ -185,23 +183,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
185 */ 183 */
186static void release_slot(struct hotplug_slot *hotplug_slot) 184static void release_slot(struct hotplug_slot *hotplug_slot)
187{ 185{
188 struct slot *slot = hotplug_slot->private;
189
190 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 186 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
191 187
192 kfree(slot->hotplug_slot->info); 188 kfree(hotplug_slot->info);
193 kfree(slot->hotplug_slot); 189 kfree(hotplug_slot);
194 kfree(slot);
195}
196
197static void make_slot_name(struct slot *slot)
198{
199 if (pciehp_slot_with_bus)
200 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
201 slot->bus, slot->number);
202 else
203 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
204 slot->number);
205} 190}
206 191
207static int init_slots(struct controller *ctrl) 192static int init_slots(struct controller *ctrl)
@@ -210,49 +195,34 @@ static int init_slots(struct controller *ctrl)
210 struct hotplug_slot *hotplug_slot; 195 struct hotplug_slot *hotplug_slot;
211 struct hotplug_slot_info *info; 196 struct hotplug_slot_info *info;
212 int retval = -ENOMEM; 197 int retval = -ENOMEM;
213 int i;
214
215 for (i = 0; i < ctrl->num_slots; i++) {
216 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
217 if (!slot)
218 goto error;
219 198
199 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
220 hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); 200 hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
221 if (!hotplug_slot) 201 if (!hotplug_slot)
222 goto error_slot; 202 goto error;
223 slot->hotplug_slot = hotplug_slot;
224 203
225 info = kzalloc(sizeof(*info), GFP_KERNEL); 204 info = kzalloc(sizeof(*info), GFP_KERNEL);
226 if (!info) 205 if (!info)
227 goto error_hpslot; 206 goto error_hpslot;
228 hotplug_slot->info = info;
229
230 hotplug_slot->name = slot->name;
231
232 slot->hp_slot = i;
233 slot->ctrl = ctrl;
234 slot->bus = ctrl->pci_dev->subordinate->number;
235 slot->device = ctrl->slot_device_offset + i;
236 slot->hpc_ops = ctrl->hpc_ops;
237 slot->number = ctrl->first_slot;
238 mutex_init(&slot->lock);
239 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
240 207
241 /* register this slot with the hotplug pci core */ 208 /* register this slot with the hotplug pci core */
209 hotplug_slot->info = info;
210 hotplug_slot->name = slot->name;
242 hotplug_slot->private = slot; 211 hotplug_slot->private = slot;
243 hotplug_slot->release = &release_slot; 212 hotplug_slot->release = &release_slot;
244 make_slot_name(slot);
245 hotplug_slot->ops = &pciehp_hotplug_slot_ops; 213 hotplug_slot->ops = &pciehp_hotplug_slot_ops;
246
247 get_power_status(hotplug_slot, &info->power_status); 214 get_power_status(hotplug_slot, &info->power_status);
248 get_attention_status(hotplug_slot, &info->attention_status); 215 get_attention_status(hotplug_slot, &info->attention_status);
249 get_latch_status(hotplug_slot, &info->latch_status); 216 get_latch_status(hotplug_slot, &info->latch_status);
250 get_adapter_status(hotplug_slot, &info->adapter_status); 217 get_adapter_status(hotplug_slot, &info->adapter_status);
218 slot->hotplug_slot = hotplug_slot;
251 219
252 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 220 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
253 "slot_device_offset=%x\n", slot->bus, slot->device, 221 "slot_device_offset=%x\n", slot->bus, slot->device,
254 slot->hp_slot, slot->number, ctrl->slot_device_offset); 222 slot->hp_slot, slot->number, ctrl->slot_device_offset);
255 retval = pci_hp_register(hotplug_slot); 223 retval = pci_hp_register(hotplug_slot,
224 ctrl->pci_dev->subordinate,
225 slot->device);
256 if (retval) { 226 if (retval) {
257 err("pci_hp_register failed with error %d\n", retval); 227 err("pci_hp_register failed with error %d\n", retval);
258 if (retval == -EEXIST) 228 if (retval == -EEXIST)
@@ -263,7 +233,7 @@ static int init_slots(struct controller *ctrl)
263 } 233 }
264 /* create additional sysfs entries */ 234 /* create additional sysfs entries */
265 if (EMI(ctrl)) { 235 if (EMI(ctrl)) {
266 retval = sysfs_create_file(&hotplug_slot->kobj, 236 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
267 &hotplug_slot_attr_lock.attr); 237 &hotplug_slot_attr_lock.attr);
268 if (retval) { 238 if (retval) {
269 pci_hp_deregister(hotplug_slot); 239 pci_hp_deregister(hotplug_slot);
@@ -271,8 +241,6 @@ static int init_slots(struct controller *ctrl)
271 goto error_info; 241 goto error_info;
272 } 242 }
273 } 243 }
274
275 list_add(&slot->slot_list, &ctrl->slot_list);
276 } 244 }
277 245
278 return 0; 246 return 0;
@@ -280,27 +248,18 @@ error_info:
280 kfree(info); 248 kfree(info);
281error_hpslot: 249error_hpslot:
282 kfree(hotplug_slot); 250 kfree(hotplug_slot);
283error_slot:
284 kfree(slot);
285error: 251error:
286 return retval; 252 return retval;
287} 253}
288 254
289static void cleanup_slots(struct controller *ctrl) 255static void cleanup_slots(struct controller *ctrl)
290{ 256{
291 struct list_head *tmp;
292 struct list_head *next;
293 struct slot *slot; 257 struct slot *slot;
294 258
295 list_for_each_safe(tmp, next, &ctrl->slot_list) { 259 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
296 slot = list_entry(tmp, struct slot, slot_list);
297 list_del(&slot->slot_list);
298 if (EMI(ctrl)) 260 if (EMI(ctrl))
299 sysfs_remove_file(&slot->hotplug_slot->kobj, 261 sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
300 &hotplug_slot_attr_lock.attr); 262 &hotplug_slot_attr_lock.attr);
301 cancel_delayed_work(&slot->work);
302 flush_scheduled_work();
303 flush_workqueue(pciehp_wq);
304 pci_hp_deregister(slot->hotplug_slot); 263 pci_hp_deregister(slot->hotplug_slot);
305 } 264 }
306} 265}
@@ -398,19 +357,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
398 return 0; 357 return 0;
399} 358}
400 359
401static int get_address(struct hotplug_slot *hotplug_slot, u32 *value) 360static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
402{ 361 enum pci_bus_speed *value)
403 struct slot *slot = hotplug_slot->private;
404 struct pci_bus *bus = slot->ctrl->pci_dev->subordinate;
405
406 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
407
408 *value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device;
409
410 return 0;
411}
412
413static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
414{ 362{
415 struct slot *slot = hotplug_slot->private; 363 struct slot *slot = hotplug_slot->private;
416 int retval; 364 int retval;
@@ -444,34 +392,30 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
444 struct controller *ctrl; 392 struct controller *ctrl;
445 struct slot *t_slot; 393 struct slot *t_slot;
446 u8 value; 394 u8 value;
447 struct pci_dev *pdev; 395 struct pci_dev *pdev = dev->port;
448 396
449 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 397 if (pciehp_force)
450 if (!ctrl) { 398 dbg("Bypassing BIOS check for pciehp use on %s\n",
451 err("%s : out of memory\n", __func__); 399 pci_name(pdev));
400 else if (pciehp_get_hp_hw_control_from_firmware(pdev))
452 goto err_out_none; 401 goto err_out_none;
453 }
454 INIT_LIST_HEAD(&ctrl->slot_list);
455
456 pdev = dev->port;
457 ctrl->pci_dev = pdev;
458 402
459 rc = pcie_init(ctrl, dev); 403 ctrl = pcie_init(dev);
460 if (rc) { 404 if (!ctrl) {
461 dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME); 405 dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME);
462 goto err_out_free_ctrl; 406 goto err_out_none;
463 } 407 }
464 408 set_service_data(dev, ctrl);
465 pci_set_drvdata(pdev, ctrl);
466
467 dbg("%s: ctrl bus=0x%x, device=%x, function=%x, irq=%x\n",
468 __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
469 PCI_FUNC(pdev->devfn), pdev->irq);
470 409
471 /* Setup the slot information structures */ 410 /* Setup the slot information structures */
472 rc = init_slots(ctrl); 411 rc = init_slots(ctrl);
473 if (rc) { 412 if (rc) {
474 err("%s: slot initialization failed\n", PCIE_MODULE_NAME); 413 if (rc == -EBUSY)
414 warn("%s: slot already registered by another "
415 "hotplug driver\n", PCIE_MODULE_NAME);
416 else
417 err("%s: slot initialization failed\n",
418 PCIE_MODULE_NAME);
475 goto err_out_release_ctlr; 419 goto err_out_release_ctlr;
476 } 420 }
477 421
@@ -495,20 +439,16 @@ err_out_free_ctrl_slot:
495 cleanup_slots(ctrl); 439 cleanup_slots(ctrl);
496err_out_release_ctlr: 440err_out_release_ctlr:
497 ctrl->hpc_ops->release_ctlr(ctrl); 441 ctrl->hpc_ops->release_ctlr(ctrl);
498err_out_free_ctrl:
499 kfree(ctrl);
500err_out_none: 442err_out_none:
501 return -ENODEV; 443 return -ENODEV;
502} 444}
503 445
504static void pciehp_remove (struct pcie_device *dev) 446static void pciehp_remove (struct pcie_device *dev)
505{ 447{
506 struct pci_dev *pdev = dev->port; 448 struct controller *ctrl = get_service_data(dev);
507 struct controller *ctrl = pci_get_drvdata(pdev);
508 449
509 cleanup_slots(ctrl); 450 cleanup_slots(ctrl);
510 ctrl->hpc_ops->release_ctlr(ctrl); 451 ctrl->hpc_ops->release_ctlr(ctrl);
511 kfree(ctrl);
512} 452}
513 453
514#ifdef CONFIG_PM 454#ifdef CONFIG_PM
@@ -522,13 +462,12 @@ static int pciehp_resume (struct pcie_device *dev)
522{ 462{
523 printk("%s ENTRY\n", __func__); 463 printk("%s ENTRY\n", __func__);
524 if (pciehp_force) { 464 if (pciehp_force) {
525 struct pci_dev *pdev = dev->port; 465 struct controller *ctrl = get_service_data(dev);
526 struct controller *ctrl = pci_get_drvdata(pdev);
527 struct slot *t_slot; 466 struct slot *t_slot;
528 u8 status; 467 u8 status;
529 468
530 /* reinitialize the chipset's event detection logic */ 469 /* reinitialize the chipset's event detection logic */
531 pcie_init_hardware_part2(ctrl, dev); 470 pcie_enable_notification(ctrl);
532 471
533 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 472 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
534 473
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 79f104963166..1323a43285d7 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -247,30 +247,32 @@ static inline void pciehp_free_irq(struct controller *ctrl)
247 free_irq(ctrl->pci_dev->irq, ctrl); 247 free_irq(ctrl->pci_dev->irq, ctrl);
248} 248}
249 249
250static inline int pcie_poll_cmd(struct controller *ctrl) 250static int pcie_poll_cmd(struct controller *ctrl)
251{ 251{
252 u16 slot_status; 252 u16 slot_status;
253 int timeout = 1000; 253 int timeout = 1000;
254 254
255 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) 255 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
256 if (slot_status & CMD_COMPLETED) 256 if (slot_status & CMD_COMPLETED) {
257 goto completed; 257 pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
258 for (timeout = 1000; timeout > 0; timeout -= 100) { 258 return 1;
259 msleep(100); 259 }
260 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) 260 }
261 if (slot_status & CMD_COMPLETED) 261 while (timeout > 1000) {
262 goto completed; 262 msleep(10);
263 timeout -= 10;
264 if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
265 if (slot_status & CMD_COMPLETED) {
266 pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
267 return 1;
268 }
269 }
263 } 270 }
264 return 0; /* timeout */ 271 return 0; /* timeout */
265
266completed:
267 pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
268 return timeout;
269} 272}
270 273
271static inline int pcie_wait_cmd(struct controller *ctrl, int poll) 274static void pcie_wait_cmd(struct controller *ctrl, int poll)
272{ 275{
273 int retval = 0;
274 unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; 276 unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
275 unsigned long timeout = msecs_to_jiffies(msecs); 277 unsigned long timeout = msecs_to_jiffies(msecs);
276 int rc; 278 int rc;
@@ -278,16 +280,9 @@ static inline int pcie_wait_cmd(struct controller *ctrl, int poll)
278 if (poll) 280 if (poll)
279 rc = pcie_poll_cmd(ctrl); 281 rc = pcie_poll_cmd(ctrl);
280 else 282 else
281 rc = wait_event_interruptible_timeout(ctrl->queue, 283 rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
282 !ctrl->cmd_busy, timeout);
283 if (!rc) 284 if (!rc)
284 dbg("Command not completed in 1000 msec\n"); 285 dbg("Command not completed in 1000 msec\n");
285 else if (rc < 0) {
286 retval = -EINTR;
287 info("Command was interrupted by a signal\n");
288 }
289
290 return retval;
291} 286}
292 287
293/** 288/**
@@ -342,10 +337,6 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
342 337
343 slot_ctrl &= ~mask; 338 slot_ctrl &= ~mask;
344 slot_ctrl |= (cmd & mask); 339 slot_ctrl |= (cmd & mask);
345 /* Don't enable command completed if caller is changing it. */
346 if (!(mask & CMD_CMPL_INTR_ENABLE))
347 slot_ctrl |= CMD_CMPL_INTR_ENABLE;
348
349 ctrl->cmd_busy = 1; 340 ctrl->cmd_busy = 1;
350 smp_mb(); 341 smp_mb();
351 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); 342 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
@@ -365,7 +356,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
365 if (!(slot_ctrl & HP_INTR_ENABLE) || 356 if (!(slot_ctrl & HP_INTR_ENABLE) ||
366 !(slot_ctrl & CMD_CMPL_INTR_ENABLE)) 357 !(slot_ctrl & CMD_CMPL_INTR_ENABLE))
367 poll = 1; 358 poll = 1;
368 retval = pcie_wait_cmd(ctrl, poll); 359 pcie_wait_cmd(ctrl, poll);
369 } 360 }
370 out: 361 out:
371 mutex_unlock(&ctrl->ctrl_lock); 362 mutex_unlock(&ctrl->ctrl_lock);
@@ -614,23 +605,6 @@ static void hpc_set_green_led_blink(struct slot *slot)
614 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 605 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
615} 606}
616 607
617static void hpc_release_ctlr(struct controller *ctrl)
618{
619 /* Mask Hot-plug Interrupt Enable */
620 if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE))
621 err("%s: Cannot mask hotplut interrupt enable\n", __func__);
622
623 /* Free interrupt handler or interrupt polling timer */
624 pciehp_free_irq(ctrl);
625
626 /*
627 * If this is the last controller to be released, destroy the
628 * pciehp work queue
629 */
630 if (atomic_dec_and_test(&pciehp_num_controllers))
631 destroy_workqueue(pciehp_wq);
632}
633
634static int hpc_power_on_slot(struct slot * slot) 608static int hpc_power_on_slot(struct slot * slot)
635{ 609{
636 struct controller *ctrl = slot->ctrl; 610 struct controller *ctrl = slot->ctrl;
@@ -785,7 +759,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
785 intr_loc |= detected; 759 intr_loc |= detected;
786 if (!intr_loc) 760 if (!intr_loc)
787 return IRQ_NONE; 761 return IRQ_NONE;
788 if (pciehp_writew(ctrl, SLOTSTATUS, detected)) { 762 if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) {
789 err("%s: Cannot write to SLOTSTATUS\n", __func__); 763 err("%s: Cannot write to SLOTSTATUS\n", __func__);
790 return IRQ_NONE; 764 return IRQ_NONE;
791 } 765 }
@@ -797,25 +771,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
797 if (intr_loc & CMD_COMPLETED) { 771 if (intr_loc & CMD_COMPLETED) {
798 ctrl->cmd_busy = 0; 772 ctrl->cmd_busy = 0;
799 smp_mb(); 773 smp_mb();
800 wake_up_interruptible(&ctrl->queue); 774 wake_up(&ctrl->queue);
801 } 775 }
802 776
803 if (!(intr_loc & ~CMD_COMPLETED)) 777 if (!(intr_loc & ~CMD_COMPLETED))
804 return IRQ_HANDLED; 778 return IRQ_HANDLED;
805 779
806 /*
807 * Return without handling events if this handler routine is
808 * called before controller initialization is done. This may
809 * happen if hotplug event or another interrupt that shares
810 * the IRQ with pciehp arrives before slot initialization is
811 * done after interrupt handler is registered.
812 *
813 * FIXME - Need more structural fixes. We need to be ready to
814 * handle the event before installing interrupt handler.
815 */
816 p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 780 p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
817 if (!p_slot || !p_slot->hpc_ops)
818 return IRQ_HANDLED;
819 781
820 /* Check MRL Sensor Changed */ 782 /* Check MRL Sensor Changed */
821 if (intr_loc & MRL_SENS_CHANGED) 783 if (intr_loc & MRL_SENS_CHANGED)
@@ -992,6 +954,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
992 return retval; 954 return retval;
993} 955}
994 956
957static void pcie_release_ctrl(struct controller *ctrl);
995static struct hpc_ops pciehp_hpc_ops = { 958static struct hpc_ops pciehp_hpc_ops = {
996 .power_on_slot = hpc_power_on_slot, 959 .power_on_slot = hpc_power_on_slot,
997 .power_off_slot = hpc_power_off_slot, 960 .power_off_slot = hpc_power_off_slot,
@@ -1013,97 +976,11 @@ static struct hpc_ops pciehp_hpc_ops = {
1013 .green_led_off = hpc_set_green_led_off, 976 .green_led_off = hpc_set_green_led_off,
1014 .green_led_blink = hpc_set_green_led_blink, 977 .green_led_blink = hpc_set_green_led_blink,
1015 978
1016 .release_ctlr = hpc_release_ctlr, 979 .release_ctlr = pcie_release_ctrl,
1017 .check_lnk_status = hpc_check_lnk_status, 980 .check_lnk_status = hpc_check_lnk_status,
1018}; 981};
1019 982
1020#ifdef CONFIG_ACPI 983int pcie_enable_notification(struct controller *ctrl)
1021static int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
1022{
1023 acpi_status status;
1024 acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
1025 struct pci_dev *pdev = dev;
1026 struct pci_bus *parent;
1027 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
1028
1029 /*
1030 * Per PCI firmware specification, we should run the ACPI _OSC
1031 * method to get control of hotplug hardware before using it.
1032 * If an _OSC is missing, we look for an OSHP to do the same thing.
1033 * To handle different BIOS behavior, we look for _OSC and OSHP
1034 * within the scope of the hotplug controller and its parents, upto
1035 * the host bridge under which this controller exists.
1036 */
1037 while (!handle) {
1038 /*
1039 * This hotplug controller was not listed in the ACPI name
1040 * space at all. Try to get acpi handle of parent pci bus.
1041 */
1042 if (!pdev || !pdev->bus->parent)
1043 break;
1044 parent = pdev->bus->parent;
1045 dbg("Could not find %s in acpi namespace, trying parent\n",
1046 pci_name(pdev));
1047 if (!parent->self)
1048 /* Parent must be a host bridge */
1049 handle = acpi_get_pci_rootbridge_handle(
1050 pci_domain_nr(parent),
1051 parent->number);
1052 else
1053 handle = DEVICE_ACPI_HANDLE(
1054 &(parent->self->dev));
1055 pdev = parent->self;
1056 }
1057
1058 while (handle) {
1059 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
1060 dbg("Trying to get hotplug control for %s \n",
1061 (char *)string.pointer);
1062 status = pci_osc_control_set(handle,
1063 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL |
1064 OSC_PCI_EXPRESS_NATIVE_HP_CONTROL);
1065 if (status == AE_NOT_FOUND)
1066 status = acpi_run_oshp(handle);
1067 if (ACPI_SUCCESS(status)) {
1068 dbg("Gained control for hotplug HW for pci %s (%s)\n",
1069 pci_name(dev), (char *)string.pointer);
1070 kfree(string.pointer);
1071 return 0;
1072 }
1073 if (acpi_root_bridge(handle))
1074 break;
1075 chandle = handle;
1076 status = acpi_get_parent(chandle, &handle);
1077 if (ACPI_FAILURE(status))
1078 break;
1079 }
1080
1081 dbg("Cannot get control of hotplug hardware for pci %s\n",
1082 pci_name(dev));
1083
1084 kfree(string.pointer);
1085 return -1;
1086}
1087#endif
1088
1089static int pcie_init_hardware_part1(struct controller *ctrl,
1090 struct pcie_device *dev)
1091{
1092 /* Clear all remaining event bits in Slot Status register */
1093 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) {
1094 err("%s: Cannot write to SLOTSTATUS register\n", __func__);
1095 return -1;
1096 }
1097
1098 /* Mask Hot-plug Interrupt Enable */
1099 if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) {
1100 err("%s: Cannot mask hotplug interrupt enable\n", __func__);
1101 return -1;
1102 }
1103 return 0;
1104}
1105
1106int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev)
1107{ 984{
1108 u16 cmd, mask; 985 u16 cmd, mask;
1109 986
@@ -1115,30 +992,83 @@ int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev)
1115 if (MRL_SENS(ctrl)) 992 if (MRL_SENS(ctrl))
1116 cmd |= MRL_DETECT_ENABLE; 993 cmd |= MRL_DETECT_ENABLE;
1117 if (!pciehp_poll_mode) 994 if (!pciehp_poll_mode)
1118 cmd |= HP_INTR_ENABLE; 995 cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1119 996
1120 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | 997 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
1121 PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | HP_INTR_ENABLE; 998 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1122 999
1123 if (pcie_write_cmd(ctrl, cmd, mask)) { 1000 if (pcie_write_cmd(ctrl, cmd, mask)) {
1124 err("%s: Cannot enable software notification\n", __func__); 1001 err("%s: Cannot enable software notification\n", __func__);
1125 goto abort; 1002 return -1;
1126 } 1003 }
1004 return 0;
1005}
1127 1006
1128 if (pciehp_force) 1007static void pcie_disable_notification(struct controller *ctrl)
1129 dbg("Bypassing BIOS check for pciehp use on %s\n", 1008{
1130 pci_name(ctrl->pci_dev)); 1009 u16 mask;
1131 else if (pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev)) 1010 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
1132 goto abort_disable_intr; 1011 PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
1012 if (pcie_write_cmd(ctrl, 0, mask))
1013 warn("%s: Cannot disable software notification\n", __func__);
1014}
1133 1015
1016static int pcie_init_notification(struct controller *ctrl)
1017{
1018 if (pciehp_request_irq(ctrl))
1019 return -1;
1020 if (pcie_enable_notification(ctrl)) {
1021 pciehp_free_irq(ctrl);
1022 return -1;
1023 }
1134 return 0; 1024 return 0;
1025}
1135 1026
1136 /* We end up here for the many possible ways to fail this API. */ 1027static void pcie_shutdown_notification(struct controller *ctrl)
1137abort_disable_intr: 1028{
1138 if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE)) 1029 pcie_disable_notification(ctrl);
1139 err("%s : disabling interrupts failed\n", __func__); 1030 pciehp_free_irq(ctrl);
1140abort: 1031}
1141 return -1; 1032
1033static void make_slot_name(struct slot *slot)
1034{
1035 if (pciehp_slot_with_bus)
1036 snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d",
1037 slot->bus, slot->number);
1038 else
1039 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
1040}
1041
1042static int pcie_init_slot(struct controller *ctrl)
1043{
1044 struct slot *slot;
1045
1046 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
1047 if (!slot)
1048 return -ENOMEM;
1049
1050 slot->hp_slot = 0;
1051 slot->ctrl = ctrl;
1052 slot->bus = ctrl->pci_dev->subordinate->number;
1053 slot->device = ctrl->slot_device_offset + slot->hp_slot;
1054 slot->hpc_ops = ctrl->hpc_ops;
1055 slot->number = ctrl->first_slot;
1056 make_slot_name(slot);
1057 mutex_init(&slot->lock);
1058 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
1059 list_add(&slot->slot_list, &ctrl->slot_list);
1060 return 0;
1061}
1062
1063static void pcie_cleanup_slot(struct controller *ctrl)
1064{
1065 struct slot *slot;
1066 slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list);
1067 list_del(&slot->slot_list);
1068 cancel_delayed_work(&slot->work);
1069 flush_scheduled_work();
1070 flush_workqueue(pciehp_wq);
1071 kfree(slot);
1142} 1072}
1143 1073
1144static inline void dbg_ctrl(struct controller *ctrl) 1074static inline void dbg_ctrl(struct controller *ctrl)
@@ -1176,15 +1106,23 @@ static inline void dbg_ctrl(struct controller *ctrl)
1176 dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); 1106 dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes");
1177 pciehp_readw(ctrl, SLOTSTATUS, &reg16); 1107 pciehp_readw(ctrl, SLOTSTATUS, &reg16);
1178 dbg("Slot Status : 0x%04x\n", reg16); 1108 dbg("Slot Status : 0x%04x\n", reg16);
1179 pciehp_readw(ctrl, SLOTSTATUS, &reg16); 1109 pciehp_readw(ctrl, SLOTCTRL, &reg16);
1180 dbg("Slot Control : 0x%04x\n", reg16); 1110 dbg("Slot Control : 0x%04x\n", reg16);
1181} 1111}
1182 1112
1183int pcie_init(struct controller *ctrl, struct pcie_device *dev) 1113struct controller *pcie_init(struct pcie_device *dev)
1184{ 1114{
1115 struct controller *ctrl;
1185 u32 slot_cap; 1116 u32 slot_cap;
1186 struct pci_dev *pdev = dev->port; 1117 struct pci_dev *pdev = dev->port;
1187 1118
1119 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1120 if (!ctrl) {
1121 err("%s : out of memory\n", __func__);
1122 goto abort;
1123 }
1124 INIT_LIST_HEAD(&ctrl->slot_list);
1125
1188 ctrl->pci_dev = pdev; 1126 ctrl->pci_dev = pdev;
1189 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1127 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1190 if (!ctrl->cap_base) { 1128 if (!ctrl->cap_base) {
@@ -1215,15 +1153,12 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev)
1215 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) 1153 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
1216 ctrl->no_cmd_complete = 1; 1154 ctrl->no_cmd_complete = 1;
1217 1155
1218 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", 1156 /* Clear all remaining event bits in Slot Status register */
1219 pdev->vendor, pdev->device, 1157 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
1220 pdev->subsystem_vendor, pdev->subsystem_device); 1158 goto abort_ctrl;
1221 1159
1222 if (pcie_init_hardware_part1(ctrl, dev)) 1160 /* Disable sotfware notification */
1223 goto abort; 1161 pcie_disable_notification(ctrl);
1224
1225 if (pciehp_request_irq(ctrl))
1226 goto abort;
1227 1162
1228 /* 1163 /*
1229 * If this is the first controller to be initialized, 1164 * If this is the first controller to be initialized,
@@ -1231,18 +1166,39 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev)
1231 */ 1166 */
1232 if (atomic_add_return(1, &pciehp_num_controllers) == 1) { 1167 if (atomic_add_return(1, &pciehp_num_controllers) == 1) {
1233 pciehp_wq = create_singlethread_workqueue("pciehpd"); 1168 pciehp_wq = create_singlethread_workqueue("pciehpd");
1234 if (!pciehp_wq) { 1169 if (!pciehp_wq)
1235 goto abort_free_irq; 1170 goto abort_ctrl;
1236 }
1237 } 1171 }
1238 1172
1239 if (pcie_init_hardware_part2(ctrl, dev)) 1173 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
1240 goto abort_free_irq; 1174 pdev->vendor, pdev->device,
1175 pdev->subsystem_vendor, pdev->subsystem_device);
1176
1177 if (pcie_init_slot(ctrl))
1178 goto abort_ctrl;
1241 1179
1242 return 0; 1180 if (pcie_init_notification(ctrl))
1181 goto abort_slot;
1243 1182
1244abort_free_irq: 1183 return ctrl;
1245 pciehp_free_irq(ctrl); 1184
1185abort_slot:
1186 pcie_cleanup_slot(ctrl);
1187abort_ctrl:
1188 kfree(ctrl);
1246abort: 1189abort:
1247 return -1; 1190 return NULL;
1191}
1192
1193void pcie_release_ctrl(struct controller *ctrl)
1194{
1195 pcie_shutdown_notification(ctrl);
1196 pcie_cleanup_slot(ctrl);
1197 /*
1198 * If this is the last controller to be released, destroy the
1199 * pciehp work queue
1200 */
1201 if (atomic_dec_and_test(&pciehp_num_controllers))
1202 destroy_workqueue(pciehp_wq);
1203 kfree(ctrl);
1248} 1204}
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index 779c5db71be4..a796301ea03f 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -14,8 +14,10 @@
14 */ 14 */
15#include <linux/kobject.h> 15#include <linux/kobject.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/pci.h>
17#include <linux/pci_hotplug.h> 18#include <linux/pci_hotplug.h>
18#include "rpadlpar.h" 19#include "rpadlpar.h"
20#include "../pci.h"
19 21
20#define DLPAR_KOBJ_NAME "control" 22#define DLPAR_KOBJ_NAME "control"
21 23
@@ -27,7 +29,6 @@
27 29
28#define MAX_DRC_NAME_LEN 64 30#define MAX_DRC_NAME_LEN 64
29 31
30
31static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, 32static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
32 const char *buf, size_t nbytes) 33 const char *buf, size_t nbytes)
33{ 34{
@@ -112,7 +113,7 @@ int dlpar_sysfs_init(void)
112 int error; 113 int error;
113 114
114 dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME, 115 dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME,
115 &pci_hotplug_slots_kset->kobj); 116 &pci_slots_kset->kobj);
116 if (!dlpar_kobj) 117 if (!dlpar_kobj)
117 return -EINVAL; 118 return -EINVAL;
118 119
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 56197b600d36..9b714ea93d20 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -33,33 +33,6 @@
33#include <asm/rtas.h> 33#include <asm/rtas.h>
34#include "rpaphp.h" 34#include "rpaphp.h"
35 35
36static ssize_t address_read_file (struct hotplug_slot *php_slot, char *buf)
37{
38 int retval;
39 struct slot *slot = (struct slot *)php_slot->private;
40 struct pci_bus *bus;
41
42 if (!slot)
43 return -ENOENT;
44
45 bus = slot->bus;
46 if (!bus)
47 return -ENOENT;
48
49 if (bus->self)
50 retval = sprintf(buf, pci_name(bus->self));
51 else
52 retval = sprintf(buf, "%04x:%02x:00.0",
53 pci_domain_nr(bus), bus->number);
54
55 return retval;
56}
57
58static struct hotplug_slot_attribute php_attr_address = {
59 .attr = {.name = "address", .mode = S_IFREG | S_IRUGO},
60 .show = address_read_file,
61};
62
63/* free up the memory used by a slot */ 36/* free up the memory used by a slot */
64static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot) 37static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
65{ 38{
@@ -135,9 +108,6 @@ int rpaphp_deregister_slot(struct slot *slot)
135 108
136 list_del(&slot->rpaphp_slot_list); 109 list_del(&slot->rpaphp_slot_list);
137 110
138 /* remove "address" file */
139 sysfs_remove_file(&php_slot->kobj, &php_attr_address.attr);
140
141 retval = pci_hp_deregister(php_slot); 111 retval = pci_hp_deregister(php_slot);
142 if (retval) 112 if (retval)
143 err("Problem unregistering a slot %s\n", slot->name); 113 err("Problem unregistering a slot %s\n", slot->name);
@@ -151,6 +121,7 @@ int rpaphp_register_slot(struct slot *slot)
151{ 121{
152 struct hotplug_slot *php_slot = slot->hotplug_slot; 122 struct hotplug_slot *php_slot = slot->hotplug_slot;
153 int retval; 123 int retval;
124 int slotno;
154 125
155 dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", 126 dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
156 __func__, slot->dn->full_name, slot->index, slot->name, 127 __func__, slot->dn->full_name, slot->index, slot->name,
@@ -162,19 +133,16 @@ int rpaphp_register_slot(struct slot *slot)
162 return -EAGAIN; 133 return -EAGAIN;
163 } 134 }
164 135
165 retval = pci_hp_register(php_slot); 136 if (slot->dn->child)
137 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
138 else
139 slotno = -1;
140 retval = pci_hp_register(php_slot, slot->bus, slotno);
166 if (retval) { 141 if (retval) {
167 err("pci_hp_register failed with error %d\n", retval); 142 err("pci_hp_register failed with error %d\n", retval);
168 return retval; 143 return retval;
169 } 144 }
170 145
171 /* create "address" file */
172 retval = sysfs_create_file(&php_slot->kobj, &php_attr_address.attr);
173 if (retval) {
174 err("sysfs_create_file failed with error %d\n", retval);
175 goto sysfs_fail;
176 }
177
178 /* add slot to our internal list */ 146 /* add slot to our internal list */
179 list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); 147 list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head);
180 info("Slot [%s] registered\n", slot->name); 148 info("Slot [%s] registered\n", slot->name);
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 2fe37cd85b69..410fe0394a8e 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -197,13 +197,15 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
197static struct hotplug_slot * sn_hp_destroy(void) 197static struct hotplug_slot * sn_hp_destroy(void)
198{ 198{
199 struct slot *slot; 199 struct slot *slot;
200 struct pci_slot *pci_slot;
200 struct hotplug_slot *bss_hotplug_slot = NULL; 201 struct hotplug_slot *bss_hotplug_slot = NULL;
201 202
202 list_for_each_entry(slot, &sn_hp_list, hp_list) { 203 list_for_each_entry(slot, &sn_hp_list, hp_list) {
203 bss_hotplug_slot = slot->hotplug_slot; 204 bss_hotplug_slot = slot->hotplug_slot;
205 pci_slot = bss_hotplug_slot->pci_slot;
204 list_del(&((struct slot *)bss_hotplug_slot->private)-> 206 list_del(&((struct slot *)bss_hotplug_slot->private)->
205 hp_list); 207 hp_list);
206 sysfs_remove_file(&bss_hotplug_slot->kobj, 208 sysfs_remove_file(&pci_slot->kobj,
207 &sn_slot_path_attr.attr); 209 &sn_slot_path_attr.attr);
208 break; 210 break;
209 } 211 }
@@ -614,6 +616,7 @@ static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
614static int sn_hotplug_slot_register(struct pci_bus *pci_bus) 616static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
615{ 617{
616 int device; 618 int device;
619 struct pci_slot *pci_slot;
617 struct hotplug_slot *bss_hotplug_slot; 620 struct hotplug_slot *bss_hotplug_slot;
618 int rc = 0; 621 int rc = 0;
619 622
@@ -650,11 +653,12 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
650 bss_hotplug_slot->ops = &sn_hotplug_slot_ops; 653 bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
651 bss_hotplug_slot->release = &sn_release_slot; 654 bss_hotplug_slot->release = &sn_release_slot;
652 655
653 rc = pci_hp_register(bss_hotplug_slot); 656 rc = pci_hp_register(bss_hotplug_slot, pci_bus, device);
654 if (rc) 657 if (rc)
655 goto register_err; 658 goto register_err;
656 659
657 rc = sysfs_create_file(&bss_hotplug_slot->kobj, 660 pci_slot = bss_hotplug_slot->pci_slot;
661 rc = sysfs_create_file(&pci_slot->kobj,
658 &sn_slot_path_attr.attr); 662 &sn_slot_path_attr.attr);
659 if (rc) 663 if (rc)
660 goto register_err; 664 goto register_err;
@@ -664,7 +668,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
664 668
665register_err: 669register_err:
666 dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n", 670 dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n",
667 rc); 671 rc);
668 672
669alloc_err: 673alloc_err:
670 if (rc == -ENOMEM) 674 if (rc == -ENOMEM)
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index f66e8d6315ab..8a026f750deb 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -170,6 +170,7 @@ extern void shpchp_queue_pushbutton_work(struct work_struct *work);
170extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev); 170extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
171 171
172#ifdef CONFIG_ACPI 172#ifdef CONFIG_ACPI
173#include <linux/pci-acpi.h>
173static inline int get_hp_params_from_firmware(struct pci_dev *dev, 174static inline int get_hp_params_from_firmware(struct pci_dev *dev,
174 struct hotplug_params *hpp) 175 struct hotplug_params *hpp)
175{ 176{
@@ -177,14 +178,15 @@ static inline int get_hp_params_from_firmware(struct pci_dev *dev,
177 return -ENODEV; 178 return -ENODEV;
178 return 0; 179 return 0;
179} 180}
180#define get_hp_hw_control_from_firmware(pdev) \ 181
181 do { \ 182static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
182 if (DEVICE_ACPI_HANDLE(&(pdev->dev))) \ 183{
183 acpi_run_oshp(DEVICE_ACPI_HANDLE(&(pdev->dev)));\ 184 u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
184 } while (0) 185 return acpi_get_hp_hw_control_from_firmware(dev, flags);
186}
185#else 187#else
186#define get_hp_params_from_firmware(dev, hpp) (-ENODEV) 188#define get_hp_params_from_firmware(dev, hpp) (-ENODEV)
187#define get_hp_hw_control_from_firmware(dev) do { } while (0) 189#define get_hp_hw_control_from_firmware(dev) (0)
188#endif 190#endif
189 191
190struct ctrl_reg { 192struct ctrl_reg {
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 97848654652a..a8cbd039b85b 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -39,7 +39,7 @@
39int shpchp_debug; 39int shpchp_debug;
40int shpchp_poll_mode; 40int shpchp_poll_mode;
41int shpchp_poll_time; 41int shpchp_poll_time;
42int shpchp_slot_with_bus; 42static int shpchp_slot_with_bus;
43struct workqueue_struct *shpchp_wq; 43struct workqueue_struct *shpchp_wq;
44 44
45#define DRIVER_VERSION "0.4" 45#define DRIVER_VERSION "0.4"
@@ -68,7 +68,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
68static int get_attention_status (struct hotplug_slot *slot, u8 *value); 68static int get_attention_status (struct hotplug_slot *slot, u8 *value);
69static int get_latch_status (struct hotplug_slot *slot, u8 *value); 69static int get_latch_status (struct hotplug_slot *slot, u8 *value);
70static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 70static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
71static int get_address (struct hotplug_slot *slot, u32 *value);
72static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 71static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 72static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
74 73
@@ -81,7 +80,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
81 .get_attention_status = get_attention_status, 80 .get_attention_status = get_attention_status,
82 .get_latch_status = get_latch_status, 81 .get_latch_status = get_latch_status,
83 .get_adapter_status = get_adapter_status, 82 .get_adapter_status = get_adapter_status,
84 .get_address = get_address,
85 .get_max_bus_speed = get_max_bus_speed, 83 .get_max_bus_speed = get_max_bus_speed,
86 .get_cur_bus_speed = get_cur_bus_speed, 84 .get_cur_bus_speed = get_cur_bus_speed,
87}; 85};
@@ -159,7 +157,8 @@ static int init_slots(struct controller *ctrl)
159 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 157 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
160 "slot_device_offset=%x\n", slot->bus, slot->device, 158 "slot_device_offset=%x\n", slot->bus, slot->device,
161 slot->hp_slot, slot->number, ctrl->slot_device_offset); 159 slot->hp_slot, slot->number, ctrl->slot_device_offset);
162 retval = pci_hp_register(slot->hotplug_slot); 160 retval = pci_hp_register(slot->hotplug_slot,
161 ctrl->pci_dev->subordinate, slot->device);
163 if (retval) { 162 if (retval) {
164 err("pci_hp_register failed with error %d\n", retval); 163 err("pci_hp_register failed with error %d\n", retval);
165 if (retval == -EEXIST) 164 if (retval == -EEXIST)
@@ -288,19 +287,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
288 return 0; 287 return 0;
289} 288}
290 289
291static int get_address (struct hotplug_slot *hotplug_slot, u32 *value) 290static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
292{ 291 enum pci_bus_speed *value)
293 struct slot *slot = get_slot(hotplug_slot);
294 struct pci_bus *bus = slot->ctrl->pci_dev->subordinate;
295
296 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
297
298 *value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device;
299
300 return 0;
301}
302
303static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
304{ 292{
305 struct slot *slot = get_slot(hotplug_slot); 293 struct slot *slot = get_slot(hotplug_slot);
306 int retval; 294 int retval;
@@ -330,13 +318,14 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
330 318
331static int is_shpc_capable(struct pci_dev *dev) 319static int is_shpc_capable(struct pci_dev *dev)
332{ 320{
333 if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == 321 if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
334 PCI_DEVICE_ID_AMD_GOLAM_7450)) 322 PCI_DEVICE_ID_AMD_GOLAM_7450))
335 return 1; 323 return 1;
336 if (pci_find_capability(dev, PCI_CAP_ID_SHPC)) 324 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
337 return 1; 325 return 0;
338 326 if (get_hp_hw_control_from_firmware(dev))
339 return 0; 327 return 0;
328 return 1;
340} 329}
341 330
342static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 331static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 7d770b2cd889..7a0bff364cd4 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -1084,7 +1084,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1084 dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__, 1084 dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__,
1085 pdev->bus->number, PCI_SLOT(pdev->devfn), 1085 pdev->bus->number, PCI_SLOT(pdev->devfn),
1086 PCI_FUNC(pdev->devfn), pdev->irq); 1086 PCI_FUNC(pdev->devfn), pdev->irq);
1087 get_hp_hw_control_from_firmware(pdev);
1088 1087
1089 /* 1088 /*
1090 * If this is the first controller to be initialized, 1089 * If this is the first controller to be initialized,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index bb0642318a95..3f7b81c065d2 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1748,7 +1748,6 @@ int __init init_dmars(void)
1748 deferred_flush = kzalloc(g_num_of_iommus * 1748 deferred_flush = kzalloc(g_num_of_iommus *
1749 sizeof(struct deferred_flush_tables), GFP_KERNEL); 1749 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1750 if (!deferred_flush) { 1750 if (!deferred_flush) {
1751 kfree(g_iommus);
1752 ret = -ENOMEM; 1751 ret = -ENOMEM;
1753 goto error; 1752 goto error;
1754 } 1753 }
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 8c61304cbb37..15af618d36e2 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -70,12 +70,10 @@ arch_teardown_msi_irqs(struct pci_dev *dev)
70 } 70 }
71} 71}
72 72
73static void msi_set_enable(struct pci_dev *dev, int enable) 73static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
74{ 74{
75 int pos;
76 u16 control; 75 u16 control;
77 76
78 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
79 if (pos) { 77 if (pos) {
80 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 78 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
81 control &= ~PCI_MSI_FLAGS_ENABLE; 79 control &= ~PCI_MSI_FLAGS_ENABLE;
@@ -85,6 +83,11 @@ static void msi_set_enable(struct pci_dev *dev, int enable)
85 } 83 }
86} 84}
87 85
86static void msi_set_enable(struct pci_dev *dev, int enable)
87{
88 __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
89}
90
88static void msix_set_enable(struct pci_dev *dev, int enable) 91static void msix_set_enable(struct pci_dev *dev, int enable)
89{ 92{
90 int pos; 93 int pos;
@@ -141,7 +144,8 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
141 mask_bits |= flag & mask; 144 mask_bits |= flag & mask;
142 pci_write_config_dword(entry->dev, pos, mask_bits); 145 pci_write_config_dword(entry->dev, pos, mask_bits);
143 } else { 146 } else {
144 msi_set_enable(entry->dev, !flag); 147 __msi_set_enable(entry->dev, entry->msi_attrib.pos,
148 !flag);
145 } 149 }
146 break; 150 break;
147 case PCI_CAP_ID_MSIX: 151 case PCI_CAP_ID_MSIX:
@@ -561,9 +565,8 @@ int pci_enable_msi(struct pci_dev* dev)
561 565
562 /* Check whether driver already requested for MSI-X irqs */ 566 /* Check whether driver already requested for MSI-X irqs */
563 if (dev->msix_enabled) { 567 if (dev->msix_enabled) {
564 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 568 dev_info(&dev->dev, "can't enable MSI "
565 "Device already has MSI-X enabled\n", 569 "(MSI-X already enabled)\n");
566 pci_name(dev));
567 return -EINVAL; 570 return -EINVAL;
568 } 571 }
569 status = msi_capability_init(dev); 572 status = msi_capability_init(dev);
@@ -686,9 +689,8 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
686 689
687 /* Check whether driver already requested for MSI irq */ 690 /* Check whether driver already requested for MSI irq */
688 if (dev->msi_enabled) { 691 if (dev->msi_enabled) {
689 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 692 dev_info(&dev->dev, "can't enable MSI-X "
690 "Device already has an MSI irq assigned\n", 693 "(MSI IRQ already assigned)\n");
691 pci_name(dev));
692 return -EINVAL; 694 return -EINVAL;
693 } 695 }
694 status = msix_capability_init(dev, entries, nvec); 696 status = msix_capability_init(dev, entries, nvec);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 9d6fc8e6285d..7764768b6a0e 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -21,12 +21,19 @@
21 21
22struct acpi_osc_data { 22struct acpi_osc_data {
23 acpi_handle handle; 23 acpi_handle handle;
24 u32 ctrlset_buf[3]; 24 u32 support_set;
25 u32 global_ctrlsets; 25 u32 control_set;
26 int is_queried;
27 u32 query_result;
26 struct list_head sibiling; 28 struct list_head sibiling;
27}; 29};
28static LIST_HEAD(acpi_osc_data_list); 30static LIST_HEAD(acpi_osc_data_list);
29 31
32struct acpi_osc_args {
33 u32 capbuf[3];
34 u32 query_result;
35};
36
30static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) 37static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
31{ 38{
32 struct acpi_osc_data *data; 39 struct acpi_osc_data *data;
@@ -44,42 +51,18 @@ static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
44 return data; 51 return data;
45} 52}
46 53
47static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; 54static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
55 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
48 56
49static acpi_status 57static acpi_status acpi_run_osc(acpi_handle handle,
50acpi_query_osc ( 58 struct acpi_osc_args *osc_args)
51 acpi_handle handle,
52 u32 level,
53 void *context,
54 void **retval )
55{ 59{
56 acpi_status status; 60 acpi_status status;
57 struct acpi_object_list input; 61 struct acpi_object_list input;
58 union acpi_object in_params[4]; 62 union acpi_object in_params[4];
59 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 63 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
60 union acpi_object *out_obj; 64 union acpi_object *out_obj;
61 u32 osc_dw0; 65 u32 osc_dw0, flags = osc_args->capbuf[OSC_QUERY_TYPE];
62 acpi_status *ret_status = (acpi_status *)retval;
63 struct acpi_osc_data *osc_data;
64 u32 flags = (unsigned long)context, temp;
65 acpi_handle tmp;
66
67 status = acpi_get_handle(handle, "_OSC", &tmp);
68 if (ACPI_FAILURE(status))
69 return status;
70
71 osc_data = acpi_get_osc_data(handle);
72 if (!osc_data) {
73 printk(KERN_ERR "acpi osc data array is full\n");
74 return AE_ERROR;
75 }
76
77 osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS);
78
79 /* do _OSC query for all possible controls */
80 temp = osc_data->ctrlset_buf[OSC_CONTROL_TYPE];
81 osc_data->ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
82 osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
83 66
84 /* Setting up input parameters */ 67 /* Setting up input parameters */
85 input.count = 4; 68 input.count = 4;
@@ -93,20 +76,19 @@ acpi_query_osc (
93 in_params[2].integer.value = 3; 76 in_params[2].integer.value = 3;
94 in_params[3].type = ACPI_TYPE_BUFFER; 77 in_params[3].type = ACPI_TYPE_BUFFER;
95 in_params[3].buffer.length = 12; 78 in_params[3].buffer.length = 12;
96 in_params[3].buffer.pointer = (u8 *)osc_data->ctrlset_buf; 79 in_params[3].buffer.pointer = (u8 *)osc_args->capbuf;
97 80
98 status = acpi_evaluate_object(handle, "_OSC", &input, &output); 81 status = acpi_evaluate_object(handle, "_OSC", &input, &output);
99 if (ACPI_FAILURE(status)) 82 if (ACPI_FAILURE(status))
100 goto out_nofree; 83 return status;
101 out_obj = output.pointer;
102 84
85 out_obj = output.pointer;
103 if (out_obj->type != ACPI_TYPE_BUFFER) { 86 if (out_obj->type != ACPI_TYPE_BUFFER) {
104 printk(KERN_DEBUG 87 printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n");
105 "Evaluate _OSC returns wrong type\n");
106 status = AE_TYPE; 88 status = AE_TYPE;
107 goto query_osc_out; 89 goto out_kfree;
108 } 90 }
109 osc_dw0 = *((u32 *) out_obj->buffer.pointer); 91 osc_dw0 = *((u32 *)out_obj->buffer.pointer);
110 if (osc_dw0) { 92 if (osc_dw0) {
111 if (osc_dw0 & OSC_REQUEST_ERROR) 93 if (osc_dw0 & OSC_REQUEST_ERROR)
112 printk(KERN_DEBUG "_OSC request fails\n"); 94 printk(KERN_DEBUG "_OSC request fails\n");
@@ -115,93 +97,58 @@ acpi_query_osc (
115 if (osc_dw0 & OSC_INVALID_REVISION_ERROR) 97 if (osc_dw0 & OSC_INVALID_REVISION_ERROR)
116 printk(KERN_DEBUG "_OSC invalid revision\n"); 98 printk(KERN_DEBUG "_OSC invalid revision\n");
117 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { 99 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) {
118 /* Update Global Control Set */ 100 if (flags & OSC_QUERY_ENABLE)
119 osc_data->global_ctrlsets = 101 goto out_success;
120 *((u32 *)(out_obj->buffer.pointer + 8)); 102 printk(KERN_DEBUG "_OSC FW not grant req. control\n");
121 status = AE_OK; 103 status = AE_SUPPORT;
122 goto query_osc_out; 104 goto out_kfree;
123 } 105 }
124 status = AE_ERROR; 106 status = AE_ERROR;
125 goto query_osc_out; 107 goto out_kfree;
126 } 108 }
127 109out_success:
128 /* Update Global Control Set */ 110 if (flags & OSC_QUERY_ENABLE)
129 osc_data->global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); 111 osc_args->query_result =
112 *((u32 *)(out_obj->buffer.pointer + 8));
130 status = AE_OK; 113 status = AE_OK;
131 114
132query_osc_out: 115out_kfree:
133 kfree(output.pointer); 116 kfree(output.pointer);
134out_nofree:
135 *ret_status = status;
136
137 osc_data->ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE;
138 osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = temp;
139 if (ACPI_FAILURE(status)) {
140 /* no osc support at all */
141 osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] = 0;
142 }
143
144 return status; 117 return status;
145} 118}
146 119
147 120static acpi_status acpi_query_osc(acpi_handle handle,
148static acpi_status 121 u32 level, void *context, void **retval)
149acpi_run_osc (
150 acpi_handle handle,
151 void *context)
152{ 122{
153 acpi_status status; 123 acpi_status status;
154 struct acpi_object_list input; 124 struct acpi_osc_data *osc_data;
155 union acpi_object in_params[4]; 125 u32 flags = (unsigned long)context, support_set;
156 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 126 acpi_handle tmp;
157 union acpi_object *out_obj; 127 struct acpi_osc_args osc_args;
158 u32 osc_dw0;
159
160 /* Setting up input parameters */
161 input.count = 4;
162 input.pointer = in_params;
163 in_params[0].type = ACPI_TYPE_BUFFER;
164 in_params[0].buffer.length = 16;
165 in_params[0].buffer.pointer = OSC_UUID;
166 in_params[1].type = ACPI_TYPE_INTEGER;
167 in_params[1].integer.value = 1;
168 in_params[2].type = ACPI_TYPE_INTEGER;
169 in_params[2].integer.value = 3;
170 in_params[3].type = ACPI_TYPE_BUFFER;
171 in_params[3].buffer.length = 12;
172 in_params[3].buffer.pointer = (u8 *)context;
173 128
174 status = acpi_evaluate_object(handle, "_OSC", &input, &output); 129 status = acpi_get_handle(handle, "_OSC", &tmp);
175 if (ACPI_FAILURE (status)) 130 if (ACPI_FAILURE(status))
176 return status; 131 return status;
177 132
178 out_obj = output.pointer; 133 osc_data = acpi_get_osc_data(handle);
179 if (out_obj->type != ACPI_TYPE_BUFFER) { 134 if (!osc_data) {
180 printk(KERN_DEBUG 135 printk(KERN_ERR "acpi osc data array is full\n");
181 "Evaluate _OSC returns wrong type\n"); 136 return AE_ERROR;
182 status = AE_TYPE;
183 goto run_osc_out;
184 } 137 }
185 osc_dw0 = *((u32 *) out_obj->buffer.pointer); 138
186 if (osc_dw0) { 139 /* do _OSC query for all possible controls */
187 if (osc_dw0 & OSC_REQUEST_ERROR) 140 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
188 printk(KERN_DEBUG "_OSC request fails\n"); 141 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
189 if (osc_dw0 & OSC_INVALID_UUID_ERROR) 142 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
190 printk(KERN_DEBUG "_OSC invalid UUID\n"); 143 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
191 if (osc_dw0 & OSC_INVALID_REVISION_ERROR) 144
192 printk(KERN_DEBUG "_OSC invalid revision\n"); 145 status = acpi_run_osc(handle, &osc_args);
193 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { 146 if (ACPI_SUCCESS(status)) {
194 printk(KERN_DEBUG "_OSC FW not grant req. control\n"); 147 osc_data->support_set = support_set;
195 status = AE_SUPPORT; 148 osc_data->query_result = osc_args.query_result;
196 goto run_osc_out; 149 osc_data->is_queried = 1;
197 }
198 status = AE_ERROR;
199 goto run_osc_out;
200 } 150 }
201 status = AE_OK;
202 151
203run_osc_out:
204 kfree(output.pointer);
205 return status; 152 return status;
206} 153}
207 154
@@ -215,15 +162,11 @@ run_osc_out:
215 **/ 162 **/
216acpi_status __pci_osc_support_set(u32 flags, const char *hid) 163acpi_status __pci_osc_support_set(u32 flags, const char *hid)
217{ 164{
218 acpi_status retval = AE_NOT_FOUND; 165 if (!(flags & OSC_SUPPORT_MASKS))
219
220 if (!(flags & OSC_SUPPORT_MASKS)) {
221 return AE_TYPE; 166 return AE_TYPE;
222 } 167
223 acpi_get_devices(hid, 168 acpi_get_devices(hid, acpi_query_osc,
224 acpi_query_osc, 169 (void *)(unsigned long)flags, NULL);
225 (void *)(unsigned long)flags,
226 (void **) &retval );
227 return AE_OK; 170 return AE_OK;
228} 171}
229 172
@@ -236,10 +179,11 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid)
236 **/ 179 **/
237acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) 180acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
238{ 181{
239 acpi_status status; 182 acpi_status status;
240 u32 ctrlset; 183 u32 ctrlset, control_set;
241 acpi_handle tmp; 184 acpi_handle tmp;
242 struct acpi_osc_data *osc_data; 185 struct acpi_osc_data *osc_data;
186 struct acpi_osc_args osc_args;
243 187
244 status = acpi_get_handle(handle, "_OSC", &tmp); 188 status = acpi_get_handle(handle, "_OSC", &tmp);
245 if (ACPI_FAILURE(status)) 189 if (ACPI_FAILURE(status))
@@ -252,24 +196,25 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
252 } 196 }
253 197
254 ctrlset = (flags & OSC_CONTROL_MASKS); 198 ctrlset = (flags & OSC_CONTROL_MASKS);
255 if (!ctrlset) { 199 if (!ctrlset)
256 return AE_TYPE; 200 return AE_TYPE;
257 } 201
258 if (osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] && 202 if (osc_data->is_queried &&
259 ((osc_data->global_ctrlsets & ctrlset) != ctrlset)) { 203 ((osc_data->query_result & ctrlset) != ctrlset))
260 return AE_SUPPORT; 204 return AE_SUPPORT;
261 } 205
262 osc_data->ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset; 206 control_set = osc_data->control_set | ctrlset;
263 status = acpi_run_osc(handle, osc_data->ctrlset_buf); 207 osc_args.capbuf[OSC_QUERY_TYPE] = 0;
264 if (ACPI_FAILURE (status)) { 208 osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
265 osc_data->ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset; 209 osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
266 } 210 status = acpi_run_osc(handle, &osc_args);
267 211 if (ACPI_SUCCESS(status))
212 osc_data->control_set = control_set;
213
268 return status; 214 return status;
269} 215}
270EXPORT_SYMBOL(pci_osc_control_set); 216EXPORT_SYMBOL(pci_osc_control_set);
271 217
272#ifdef CONFIG_ACPI_SLEEP
273/* 218/*
274 * _SxD returns the D-state with the highest power 219 * _SxD returns the D-state with the highest power
275 * (lowest D-state number) supported in the S-state "x". 220 * (lowest D-state number) supported in the S-state "x".
@@ -293,13 +238,11 @@ EXPORT_SYMBOL(pci_osc_control_set);
293 * choose highest power _SxD or any lower power 238 * choose highest power _SxD or any lower power
294 */ 239 */
295 240
296static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev, 241static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
297 pm_message_t state)
298{ 242{
299 int acpi_state; 243 int acpi_state;
300 244
301 acpi_state = acpi_pm_device_sleep_state(&pdev->dev, 245 acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL);
302 device_may_wakeup(&pdev->dev), NULL);
303 if (acpi_state < 0) 246 if (acpi_state < 0)
304 return PCI_POWER_ERROR; 247 return PCI_POWER_ERROR;
305 248
@@ -315,7 +258,13 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev,
315 } 258 }
316 return PCI_POWER_ERROR; 259 return PCI_POWER_ERROR;
317} 260}
318#endif 261
262static bool acpi_pci_power_manageable(struct pci_dev *dev)
263{
264 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
265
266 return handle ? acpi_bus_power_manageable(handle) : false;
267}
319 268
320static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) 269static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
321{ 270{
@@ -328,12 +277,11 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
328 [PCI_D3hot] = ACPI_STATE_D3, 277 [PCI_D3hot] = ACPI_STATE_D3,
329 [PCI_D3cold] = ACPI_STATE_D3 278 [PCI_D3cold] = ACPI_STATE_D3
330 }; 279 };
280 int error = -EINVAL;
331 281
332 if (!handle)
333 return -ENODEV;
334 /* If the ACPI device has _EJ0, ignore the device */ 282 /* If the ACPI device has _EJ0, ignore the device */
335 if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) 283 if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
336 return 0; 284 return -ENODEV;
337 285
338 switch (state) { 286 switch (state) {
339 case PCI_D0: 287 case PCI_D0:
@@ -341,11 +289,41 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
341 case PCI_D2: 289 case PCI_D2:
342 case PCI_D3hot: 290 case PCI_D3hot:
343 case PCI_D3cold: 291 case PCI_D3cold:
344 return acpi_bus_set_power(handle, state_conv[state]); 292 error = acpi_bus_set_power(handle, state_conv[state]);
345 } 293 }
346 return -EINVAL; 294
295 if (!error)
296 dev_printk(KERN_INFO, &dev->dev,
297 "power state changed by ACPI to D%d\n", state);
298
299 return error;
300}
301
302static bool acpi_pci_can_wakeup(struct pci_dev *dev)
303{
304 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
305
306 return handle ? acpi_bus_can_wakeup(handle) : false;
307}
308
309static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
310{
311 int error = acpi_pm_device_sleep_wake(&dev->dev, enable);
312
313 if (!error)
314 dev_printk(KERN_INFO, &dev->dev,
315 "wake-up capability %s by ACPI\n",
316 enable ? "enabled" : "disabled");
317 return error;
347} 318}
348 319
320static struct pci_platform_pm_ops acpi_pci_platform_pm = {
321 .is_manageable = acpi_pci_power_manageable,
322 .set_state = acpi_pci_set_power_state,
323 .choose_state = acpi_pci_choose_state,
324 .can_wakeup = acpi_pci_can_wakeup,
325 .sleep_wake = acpi_pci_sleep_wake,
326};
349 327
350/* ACPI bus type */ 328/* ACPI bus type */
351static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) 329static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
@@ -397,10 +375,7 @@ static int __init acpi_pci_init(void)
397 ret = register_acpi_bus_type(&acpi_pci_bus); 375 ret = register_acpi_bus_type(&acpi_pci_bus);
398 if (ret) 376 if (ret)
399 return 0; 377 return 0;
400#ifdef CONFIG_ACPI_SLEEP 378 pci_set_platform_pm(&acpi_pci_platform_pm);
401 platform_pci_choose_state = acpi_pci_choose_state;
402#endif
403 platform_pci_set_power_state = acpi_pci_set_power_state;
404 return 0; 379 return 0;
405} 380}
406arch_initcall(acpi_pci_init); 381arch_initcall(acpi_pci_init);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index e1637bd82b8e..a13f53486114 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -274,7 +274,57 @@ static int pci_device_remove(struct device * dev)
274 return 0; 274 return 0;
275} 275}
276 276
277static int pci_device_suspend(struct device * dev, pm_message_t state) 277static void pci_device_shutdown(struct device *dev)
278{
279 struct pci_dev *pci_dev = to_pci_dev(dev);
280 struct pci_driver *drv = pci_dev->driver;
281
282 if (drv && drv->shutdown)
283 drv->shutdown(pci_dev);
284 pci_msi_shutdown(pci_dev);
285 pci_msix_shutdown(pci_dev);
286}
287
288#ifdef CONFIG_PM_SLEEP
289
290/*
291 * Default "suspend" method for devices that have no driver provided suspend,
292 * or not even a driver at all.
293 */
294static void pci_default_pm_suspend(struct pci_dev *pci_dev)
295{
296 pci_save_state(pci_dev);
297 /*
298 * mark its power state as "unknown", since we don't know if
299 * e.g. the BIOS will change its device state when we suspend.
300 */
301 if (pci_dev->current_state == PCI_D0)
302 pci_dev->current_state = PCI_UNKNOWN;
303}
304
305/*
306 * Default "resume" method for devices that have no driver provided resume,
307 * or not even a driver at all.
308 */
309static int pci_default_pm_resume(struct pci_dev *pci_dev)
310{
311 int retval = 0;
312
313 /* restore the PCI config space */
314 pci_restore_state(pci_dev);
315 /* if the device was enabled before suspend, reenable */
316 retval = pci_reenable_device(pci_dev);
317 /*
318 * if the device was busmaster before the suspend, make it busmaster
319 * again
320 */
321 if (pci_dev->is_busmaster)
322 pci_set_master(pci_dev);
323
324 return retval;
325}
326
327static int pci_legacy_suspend(struct device *dev, pm_message_t state)
278{ 328{
279 struct pci_dev * pci_dev = to_pci_dev(dev); 329 struct pci_dev * pci_dev = to_pci_dev(dev);
280 struct pci_driver * drv = pci_dev->driver; 330 struct pci_driver * drv = pci_dev->driver;
@@ -284,18 +334,12 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
284 i = drv->suspend(pci_dev, state); 334 i = drv->suspend(pci_dev, state);
285 suspend_report_result(drv->suspend, i); 335 suspend_report_result(drv->suspend, i);
286 } else { 336 } else {
287 pci_save_state(pci_dev); 337 pci_default_pm_suspend(pci_dev);
288 /*
289 * mark its power state as "unknown", since we don't know if
290 * e.g. the BIOS will change its device state when we suspend.
291 */
292 if (pci_dev->current_state == PCI_D0)
293 pci_dev->current_state = PCI_UNKNOWN;
294 } 338 }
295 return i; 339 return i;
296} 340}
297 341
298static int pci_device_suspend_late(struct device * dev, pm_message_t state) 342static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
299{ 343{
300 struct pci_dev * pci_dev = to_pci_dev(dev); 344 struct pci_dev * pci_dev = to_pci_dev(dev);
301 struct pci_driver * drv = pci_dev->driver; 345 struct pci_driver * drv = pci_dev->driver;
@@ -308,26 +352,7 @@ static int pci_device_suspend_late(struct device * dev, pm_message_t state)
308 return i; 352 return i;
309} 353}
310 354
311/* 355static int pci_legacy_resume(struct device *dev)
312 * Default resume method for devices that have no driver provided resume,
313 * or not even a driver at all.
314 */
315static int pci_default_resume(struct pci_dev *pci_dev)
316{
317 int retval = 0;
318
319 /* restore the PCI config space */
320 pci_restore_state(pci_dev);
321 /* if the device was enabled before suspend, reenable */
322 retval = pci_reenable_device(pci_dev);
323 /* if the device was busmaster before the suspend, make it busmaster again */
324 if (pci_dev->is_busmaster)
325 pci_set_master(pci_dev);
326
327 return retval;
328}
329
330static int pci_device_resume(struct device * dev)
331{ 356{
332 int error; 357 int error;
333 struct pci_dev * pci_dev = to_pci_dev(dev); 358 struct pci_dev * pci_dev = to_pci_dev(dev);
@@ -336,34 +361,313 @@ static int pci_device_resume(struct device * dev)
336 if (drv && drv->resume) 361 if (drv && drv->resume)
337 error = drv->resume(pci_dev); 362 error = drv->resume(pci_dev);
338 else 363 else
339 error = pci_default_resume(pci_dev); 364 error = pci_default_pm_resume(pci_dev);
340 return error; 365 return error;
341} 366}
342 367
343static int pci_device_resume_early(struct device * dev) 368static int pci_legacy_resume_early(struct device *dev)
344{ 369{
345 int error = 0; 370 int error = 0;
346 struct pci_dev * pci_dev = to_pci_dev(dev); 371 struct pci_dev * pci_dev = to_pci_dev(dev);
347 struct pci_driver * drv = pci_dev->driver; 372 struct pci_driver * drv = pci_dev->driver;
348 373
349 pci_fixup_device(pci_fixup_resume, pci_dev);
350
351 if (drv && drv->resume_early) 374 if (drv && drv->resume_early)
352 error = drv->resume_early(pci_dev); 375 error = drv->resume_early(pci_dev);
353 return error; 376 return error;
354} 377}
355 378
356static void pci_device_shutdown(struct device *dev) 379static int pci_pm_prepare(struct device *dev)
380{
381 struct device_driver *drv = dev->driver;
382 int error = 0;
383
384 if (drv && drv->pm && drv->pm->prepare)
385 error = drv->pm->prepare(dev);
386
387 return error;
388}
389
390static void pci_pm_complete(struct device *dev)
391{
392 struct device_driver *drv = dev->driver;
393
394 if (drv && drv->pm && drv->pm->complete)
395 drv->pm->complete(dev);
396}
397
398#ifdef CONFIG_SUSPEND
399
400static int pci_pm_suspend(struct device *dev)
401{
402 struct pci_dev *pci_dev = to_pci_dev(dev);
403 struct device_driver *drv = dev->driver;
404 int error = 0;
405
406 if (drv && drv->pm) {
407 if (drv->pm->suspend) {
408 error = drv->pm->suspend(dev);
409 suspend_report_result(drv->pm->suspend, error);
410 } else {
411 pci_default_pm_suspend(pci_dev);
412 }
413 } else {
414 error = pci_legacy_suspend(dev, PMSG_SUSPEND);
415 }
416 pci_fixup_device(pci_fixup_suspend, pci_dev);
417
418 return error;
419}
420
421static int pci_pm_suspend_noirq(struct device *dev)
357{ 422{
358 struct pci_dev *pci_dev = to_pci_dev(dev); 423 struct pci_dev *pci_dev = to_pci_dev(dev);
359 struct pci_driver *drv = pci_dev->driver; 424 struct pci_driver *drv = pci_dev->driver;
425 int error = 0;
360 426
361 if (drv && drv->shutdown) 427 if (drv && drv->pm) {
362 drv->shutdown(pci_dev); 428 if (drv->pm->suspend_noirq) {
363 pci_msi_shutdown(pci_dev); 429 error = drv->pm->suspend_noirq(dev);
364 pci_msix_shutdown(pci_dev); 430 suspend_report_result(drv->pm->suspend_noirq, error);
431 }
432 } else {
433 error = pci_legacy_suspend_late(dev, PMSG_SUSPEND);
434 }
435
436 return error;
365} 437}
366 438
439static int pci_pm_resume(struct device *dev)
440{
441 struct pci_dev *pci_dev = to_pci_dev(dev);
442 struct device_driver *drv = dev->driver;
443 int error;
444
445 pci_fixup_device(pci_fixup_resume, pci_dev);
446
447 if (drv && drv->pm) {
448 error = drv->pm->resume ? drv->pm->resume(dev) :
449 pci_default_pm_resume(pci_dev);
450 } else {
451 error = pci_legacy_resume(dev);
452 }
453
454 return error;
455}
456
457static int pci_pm_resume_noirq(struct device *dev)
458{
459 struct pci_dev *pci_dev = to_pci_dev(dev);
460 struct pci_driver *drv = pci_dev->driver;
461 int error = 0;
462
463 pci_fixup_device(pci_fixup_resume_early, pci_dev);
464
465 if (drv && drv->pm) {
466 if (drv->pm->resume_noirq)
467 error = drv->pm->resume_noirq(dev);
468 } else {
469 error = pci_legacy_resume_early(dev);
470 }
471
472 return error;
473}
474
475#else /* !CONFIG_SUSPEND */
476
477#define pci_pm_suspend NULL
478#define pci_pm_suspend_noirq NULL
479#define pci_pm_resume NULL
480#define pci_pm_resume_noirq NULL
481
482#endif /* !CONFIG_SUSPEND */
483
484#ifdef CONFIG_HIBERNATION
485
486static int pci_pm_freeze(struct device *dev)
487{
488 struct pci_dev *pci_dev = to_pci_dev(dev);
489 struct device_driver *drv = dev->driver;
490 int error = 0;
491
492 if (drv && drv->pm) {
493 if (drv->pm->freeze) {
494 error = drv->pm->freeze(dev);
495 suspend_report_result(drv->pm->freeze, error);
496 } else {
497 pci_default_pm_suspend(pci_dev);
498 }
499 } else {
500 error = pci_legacy_suspend(dev, PMSG_FREEZE);
501 pci_fixup_device(pci_fixup_suspend, pci_dev);
502 }
503
504 return error;
505}
506
507static int pci_pm_freeze_noirq(struct device *dev)
508{
509 struct pci_dev *pci_dev = to_pci_dev(dev);
510 struct pci_driver *drv = pci_dev->driver;
511 int error = 0;
512
513 if (drv && drv->pm) {
514 if (drv->pm->freeze_noirq) {
515 error = drv->pm->freeze_noirq(dev);
516 suspend_report_result(drv->pm->freeze_noirq, error);
517 }
518 } else {
519 error = pci_legacy_suspend_late(dev, PMSG_FREEZE);
520 }
521
522 return error;
523}
524
525static int pci_pm_thaw(struct device *dev)
526{
527 struct device_driver *drv = dev->driver;
528 int error = 0;
529
530 if (drv && drv->pm) {
531 if (drv->pm->thaw)
532 error = drv->pm->thaw(dev);
533 } else {
534 pci_fixup_device(pci_fixup_resume, to_pci_dev(dev));
535 error = pci_legacy_resume(dev);
536 }
537
538 return error;
539}
540
541static int pci_pm_thaw_noirq(struct device *dev)
542{
543 struct pci_dev *pci_dev = to_pci_dev(dev);
544 struct pci_driver *drv = pci_dev->driver;
545 int error = 0;
546
547 if (drv && drv->pm) {
548 if (drv->pm->thaw_noirq)
549 error = drv->pm->thaw_noirq(dev);
550 } else {
551 pci_fixup_device(pci_fixup_resume_early, pci_dev);
552 error = pci_legacy_resume_early(dev);
553 }
554
555 return error;
556}
557
558static int pci_pm_poweroff(struct device *dev)
559{
560 struct device_driver *drv = dev->driver;
561 int error = 0;
562
563 pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
564
565 if (drv && drv->pm) {
566 if (drv->pm->poweroff) {
567 error = drv->pm->poweroff(dev);
568 suspend_report_result(drv->pm->poweroff, error);
569 }
570 } else {
571 error = pci_legacy_suspend(dev, PMSG_HIBERNATE);
572 }
573
574 return error;
575}
576
577static int pci_pm_poweroff_noirq(struct device *dev)
578{
579 struct pci_dev *pci_dev = to_pci_dev(dev);
580 struct pci_driver *drv = pci_dev->driver;
581 int error = 0;
582
583 if (drv && drv->pm) {
584 if (drv->pm->poweroff_noirq) {
585 error = drv->pm->poweroff_noirq(dev);
586 suspend_report_result(drv->pm->poweroff_noirq, error);
587 }
588 } else {
589 error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
590 }
591
592 return error;
593}
594
595static int pci_pm_restore(struct device *dev)
596{
597 struct pci_dev *pci_dev = to_pci_dev(dev);
598 struct device_driver *drv = dev->driver;
599 int error;
600
601 if (drv && drv->pm) {
602 error = drv->pm->restore ? drv->pm->restore(dev) :
603 pci_default_pm_resume(pci_dev);
604 } else {
605 error = pci_legacy_resume(dev);
606 }
607 pci_fixup_device(pci_fixup_resume, pci_dev);
608
609 return error;
610}
611
612static int pci_pm_restore_noirq(struct device *dev)
613{
614 struct pci_dev *pci_dev = to_pci_dev(dev);
615 struct pci_driver *drv = pci_dev->driver;
616 int error = 0;
617
618 pci_fixup_device(pci_fixup_resume, pci_dev);
619
620 if (drv && drv->pm) {
621 if (drv->pm->restore_noirq)
622 error = drv->pm->restore_noirq(dev);
623 } else {
624 error = pci_legacy_resume_early(dev);
625 }
626 pci_fixup_device(pci_fixup_resume_early, pci_dev);
627
628 return error;
629}
630
631#else /* !CONFIG_HIBERNATION */
632
633#define pci_pm_freeze NULL
634#define pci_pm_freeze_noirq NULL
635#define pci_pm_thaw NULL
636#define pci_pm_thaw_noirq NULL
637#define pci_pm_poweroff NULL
638#define pci_pm_poweroff_noirq NULL
639#define pci_pm_restore NULL
640#define pci_pm_restore_noirq NULL
641
642#endif /* !CONFIG_HIBERNATION */
643
644struct pm_ext_ops pci_pm_ops = {
645 .base = {
646 .prepare = pci_pm_prepare,
647 .complete = pci_pm_complete,
648 .suspend = pci_pm_suspend,
649 .resume = pci_pm_resume,
650 .freeze = pci_pm_freeze,
651 .thaw = pci_pm_thaw,
652 .poweroff = pci_pm_poweroff,
653 .restore = pci_pm_restore,
654 },
655 .suspend_noirq = pci_pm_suspend_noirq,
656 .resume_noirq = pci_pm_resume_noirq,
657 .freeze_noirq = pci_pm_freeze_noirq,
658 .thaw_noirq = pci_pm_thaw_noirq,
659 .poweroff_noirq = pci_pm_poweroff_noirq,
660 .restore_noirq = pci_pm_restore_noirq,
661};
662
663#define PCI_PM_OPS_PTR &pci_pm_ops
664
665#else /* !CONFIG_PM_SLEEP */
666
667#define PCI_PM_OPS_PTR NULL
668
669#endif /* !CONFIG_PM_SLEEP */
670
367/** 671/**
368 * __pci_register_driver - register a new pci driver 672 * __pci_register_driver - register a new pci driver
369 * @drv: the driver structure to register 673 * @drv: the driver structure to register
@@ -386,6 +690,9 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
386 drv->driver.owner = owner; 690 drv->driver.owner = owner;
387 drv->driver.mod_name = mod_name; 691 drv->driver.mod_name = mod_name;
388 692
693 if (drv->pm)
694 drv->driver.pm = &drv->pm->base;
695
389 spin_lock_init(&drv->dynids.lock); 696 spin_lock_init(&drv->dynids.lock);
390 INIT_LIST_HEAD(&drv->dynids.list); 697 INIT_LIST_HEAD(&drv->dynids.list);
391 698
@@ -511,12 +818,9 @@ struct bus_type pci_bus_type = {
511 .uevent = pci_uevent, 818 .uevent = pci_uevent,
512 .probe = pci_device_probe, 819 .probe = pci_device_probe,
513 .remove = pci_device_remove, 820 .remove = pci_device_remove,
514 .suspend = pci_device_suspend,
515 .suspend_late = pci_device_suspend_late,
516 .resume_early = pci_device_resume_early,
517 .resume = pci_device_resume,
518 .shutdown = pci_device_shutdown, 821 .shutdown = pci_device_shutdown,
519 .dev_attrs = pci_dev_attrs, 822 .dev_attrs = pci_dev_attrs,
823 .pm = PCI_PM_OPS_PTR,
520}; 824};
521 825
522static int __init pci_driver_init(void) 826static int __init pci_driver_init(void)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e4548ab2a93c..44a46c92b721 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
3 *
4 * PCI Bus Services, see include/linux/pci.h for further explanation. 2 * PCI Bus Services, see include/linux/pci.h for further explanation.
5 * 3 *
6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
@@ -19,6 +17,7 @@
19#include <linux/string.h> 17#include <linux/string.h>
20#include <linux/log2.h> 18#include <linux/log2.h>
21#include <linux/pci-aspm.h> 19#include <linux/pci-aspm.h>
20#include <linux/pm_wakeup.h>
22#include <asm/dma.h> /* isa_dma_bridge_buggy */ 21#include <asm/dma.h> /* isa_dma_bridge_buggy */
23#include "pci.h" 22#include "pci.h"
24 23
@@ -378,74 +377,90 @@ pci_restore_bars(struct pci_dev *dev)
378 pci_update_resource(dev, &dev->resource[i], i); 377 pci_update_resource(dev, &dev->resource[i], i);
379} 378}
380 379
381int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); 380static struct pci_platform_pm_ops *pci_platform_pm;
382 381
383/** 382int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
384 * pci_set_power_state - Set the power state of a PCI device
385 * @dev: PCI device to be suspended
386 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
387 *
388 * Transition a device to a new power state, using the Power Management
389 * Capabilities in the device's config space.
390 *
391 * RETURN VALUE:
392 * -EINVAL if trying to enter a lower state than we're already in.
393 * 0 if we're already in the requested state.
394 * -EIO if device does not support PCI PM.
395 * 0 if we can successfully change the power state.
396 */
397int
398pci_set_power_state(struct pci_dev *dev, pci_power_t state)
399{ 383{
400 int pm, need_restore = 0; 384 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
401 u16 pmcsr, pmc; 385 || !ops->sleep_wake || !ops->can_wakeup)
386 return -EINVAL;
387 pci_platform_pm = ops;
388 return 0;
389}
402 390
403 /* bound the state we're entering */ 391static inline bool platform_pci_power_manageable(struct pci_dev *dev)
404 if (state > PCI_D3hot) 392{
405 state = PCI_D3hot; 393 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
394}
406 395
407 /* 396static inline int platform_pci_set_power_state(struct pci_dev *dev,
408 * If the device or the parent bridge can't support PCI PM, ignore 397 pci_power_t t)
409 * the request if we're doing anything besides putting it into D0 398{
410 * (which would only happen on boot). 399 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
411 */ 400}
412 if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
413 return 0;
414 401
415 /* find PCI PM capability in list */ 402static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
416 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 403{
404 return pci_platform_pm ?
405 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
406}
417 407
418 /* abort if the device doesn't support PM capabilities */ 408static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
419 if (!pm) 409{
410 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
411}
412
413static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
414{
415 return pci_platform_pm ?
416 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
417}
418
419/**
420 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
421 * given PCI device
422 * @dev: PCI device to handle.
423 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
424 *
425 * RETURN VALUE:
426 * -EINVAL if the requested state is invalid.
427 * -EIO if device does not support PCI PM or its PM capabilities register has a
428 * wrong version, or device doesn't support the requested state.
429 * 0 if device already is in the requested state.
430 * 0 if device's power state has been successfully changed.
431 */
432static int
433pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
434{
435 u16 pmcsr;
436 bool need_restore = false;
437
438 if (!dev->pm_cap)
420 return -EIO; 439 return -EIO;
421 440
441 if (state < PCI_D0 || state > PCI_D3hot)
442 return -EINVAL;
443
422 /* Validate current state: 444 /* Validate current state:
423 * Can enter D0 from any state, but if we can only go deeper 445 * Can enter D0 from any state, but if we can only go deeper
424 * to sleep if we're already in a low power state 446 * to sleep if we're already in a low power state
425 */ 447 */
426 if (state != PCI_D0 && dev->current_state > state) { 448 if (dev->current_state == state) {
427 printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", 449 /* we're already there */
428 __func__, pci_name(dev), state, dev->current_state); 450 return 0;
451 } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold
452 && dev->current_state > state) {
453 dev_err(&dev->dev, "invalid power transition "
454 "(from state %d to %d)\n", dev->current_state, state);
429 return -EINVAL; 455 return -EINVAL;
430 } else if (dev->current_state == state)
431 return 0; /* we're already there */
432
433
434 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
435 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
436 printk(KERN_DEBUG
437 "PCI: %s has unsupported PM cap regs version (%u)\n",
438 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
439 return -EIO;
440 } 456 }
441 457
442 /* check if this device supports the desired state */ 458 /* check if this device supports the desired state */
443 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) 459 if ((state == PCI_D1 && !dev->d1_support)
444 return -EIO; 460 || (state == PCI_D2 && !dev->d2_support))
445 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
446 return -EIO; 461 return -EIO;
447 462
448 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 463 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
449 464
450 /* If we're (effectively) in D3, force entire word to 0. 465 /* If we're (effectively) in D3, force entire word to 0.
451 * This doesn't affect PME_Status, disables PME_En, and 466 * This doesn't affect PME_Status, disables PME_En, and
@@ -461,7 +476,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
461 case PCI_UNKNOWN: /* Boot-up */ 476 case PCI_UNKNOWN: /* Boot-up */
462 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 477 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
463 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 478 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
464 need_restore = 1; 479 need_restore = true;
465 /* Fall-through: force to D0 */ 480 /* Fall-through: force to D0 */
466 default: 481 default:
467 pmcsr = 0; 482 pmcsr = 0;
@@ -469,7 +484,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
469 } 484 }
470 485
471 /* enter specified state */ 486 /* enter specified state */
472 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); 487 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
473 488
474 /* Mandatory power management transition delays */ 489 /* Mandatory power management transition delays */
475 /* see PCI PM 1.1 5.6.1 table 18 */ 490 /* see PCI PM 1.1 5.6.1 table 18 */
@@ -478,13 +493,6 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
478 else if (state == PCI_D2 || dev->current_state == PCI_D2) 493 else if (state == PCI_D2 || dev->current_state == PCI_D2)
479 udelay(200); 494 udelay(200);
480 495
481 /*
482 * Give firmware a chance to be called, such as ACPI _PRx, _PSx
483 * Firmware method after native method ?
484 */
485 if (platform_pci_set_power_state)
486 platform_pci_set_power_state(dev, state);
487
488 dev->current_state = state; 496 dev->current_state = state;
489 497
490 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 498 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
@@ -508,8 +516,77 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
508 return 0; 516 return 0;
509} 517}
510 518
511pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); 519/**
512 520 * pci_update_current_state - Read PCI power state of given device from its
521 * PCI PM registers and cache it
522 * @dev: PCI device to handle.
523 */
524static void pci_update_current_state(struct pci_dev *dev)
525{
526 if (dev->pm_cap) {
527 u16 pmcsr;
528
529 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
530 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
531 }
532}
533
534/**
535 * pci_set_power_state - Set the power state of a PCI device
536 * @dev: PCI device to handle.
537 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
538 *
539 * Transition a device to a new power state, using the platform formware and/or
540 * the device's PCI PM registers.
541 *
542 * RETURN VALUE:
543 * -EINVAL if the requested state is invalid.
544 * -EIO if device does not support PCI PM or its PM capabilities register has a
545 * wrong version, or device doesn't support the requested state.
546 * 0 if device already is in the requested state.
547 * 0 if device's power state has been successfully changed.
548 */
549int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
550{
551 int error;
552
553 /* bound the state we're entering */
554 if (state > PCI_D3hot)
555 state = PCI_D3hot;
556 else if (state < PCI_D0)
557 state = PCI_D0;
558 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
559 /*
560 * If the device or the parent bridge do not support PCI PM,
561 * ignore the request if we're doing anything other than putting
562 * it into D0 (which would only happen on boot).
563 */
564 return 0;
565
566 if (state == PCI_D0 && platform_pci_power_manageable(dev)) {
567 /*
568 * Allow the platform to change the state, for example via ACPI
569 * _PR0, _PS0 and some such, but do not trust it.
570 */
571 int ret = platform_pci_set_power_state(dev, PCI_D0);
572 if (!ret)
573 pci_update_current_state(dev);
574 }
575
576 error = pci_raw_set_power_state(dev, state);
577
578 if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
579 /* Allow the platform to finalize the transition */
580 int ret = platform_pci_set_power_state(dev, state);
581 if (!ret) {
582 pci_update_current_state(dev);
583 error = 0;
584 }
585 }
586
587 return error;
588}
589
513/** 590/**
514 * pci_choose_state - Choose the power state of a PCI device 591 * pci_choose_state - Choose the power state of a PCI device
515 * @dev: PCI device to be suspended 592 * @dev: PCI device to be suspended
@@ -527,11 +604,9 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
527 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 604 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
528 return PCI_D0; 605 return PCI_D0;
529 606
530 if (platform_pci_choose_state) { 607 ret = platform_pci_choose_state(dev);
531 ret = platform_pci_choose_state(dev, state); 608 if (ret != PCI_POWER_ERROR)
532 if (ret != PCI_POWER_ERROR) 609 return ret;
533 return ret;
534 }
535 610
536 switch (state.event) { 611 switch (state.event) {
537 case PM_EVENT_ON: 612 case PM_EVENT_ON:
@@ -543,7 +618,8 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
543 case PM_EVENT_HIBERNATE: 618 case PM_EVENT_HIBERNATE:
544 return PCI_D3hot; 619 return PCI_D3hot;
545 default: 620 default:
546 printk("Unrecognized suspend event %d\n", state.event); 621 dev_info(&dev->dev, "unrecognized suspend event %d\n",
622 state.event);
547 BUG(); 623 BUG();
548 } 624 }
549 return PCI_D0; 625 return PCI_D0;
@@ -568,7 +644,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
568 else 644 else
569 found = 1; 645 found = 1;
570 if (!save_state) { 646 if (!save_state) {
571 dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); 647 dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
572 return -ENOMEM; 648 return -ENOMEM;
573 } 649 }
574 cap = (u16 *)&save_state->data[0]; 650 cap = (u16 *)&save_state->data[0];
@@ -619,7 +695,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
619 else 695 else
620 found = 1; 696 found = 1;
621 if (!save_state) { 697 if (!save_state) {
622 dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); 698 dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
623 return -ENOMEM; 699 return -ENOMEM;
624 } 700 }
625 cap = (u16 *)&save_state->data[0]; 701 cap = (u16 *)&save_state->data[0];
@@ -685,10 +761,9 @@ pci_restore_state(struct pci_dev *dev)
685 for (i = 15; i >= 0; i--) { 761 for (i = 15; i >= 0; i--) {
686 pci_read_config_dword(dev, i * 4, &val); 762 pci_read_config_dword(dev, i * 4, &val);
687 if (val != dev->saved_config_space[i]) { 763 if (val != dev->saved_config_space[i]) {
688 printk(KERN_DEBUG "PM: Writing back config space on " 764 dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
689 "device %s at offset %x (was %x, writing %x)\n", 765 "space at offset %#x (was %#x, writing %#x)\n",
690 pci_name(dev), i, 766 i, val, (int)dev->saved_config_space[i]);
691 val, (int)dev->saved_config_space[i]);
692 pci_write_config_dword(dev,i * 4, 767 pci_write_config_dword(dev,i * 4,
693 dev->saved_config_space[i]); 768 dev->saved_config_space[i]);
694 } 769 }
@@ -961,6 +1036,46 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
961} 1036}
962 1037
963/** 1038/**
1039 * pci_pme_capable - check the capability of PCI device to generate PME#
1040 * @dev: PCI device to handle.
1041 * @state: PCI state from which device will issue PME#.
1042 */
1043static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1044{
1045 if (!dev->pm_cap)
1046 return false;
1047
1048 return !!(dev->pme_support & (1 << state));
1049}
1050
1051/**
1052 * pci_pme_active - enable or disable PCI device's PME# function
1053 * @dev: PCI device to handle.
1054 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1055 *
1056 * The caller must verify that the device is capable of generating PME# before
1057 * calling this function with @enable equal to 'true'.
1058 */
1059static void pci_pme_active(struct pci_dev *dev, bool enable)
1060{
1061 u16 pmcsr;
1062
1063 if (!dev->pm_cap)
1064 return;
1065
1066 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1067 /* Clear PME_Status by writing 1 to it and enable PME# */
1068 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1069 if (!enable)
1070 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1071
1072 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1073
1074 dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
1075 enable ? "enabled" : "disabled");
1076}
1077
1078/**
964 * pci_enable_wake - enable PCI device as wakeup event source 1079 * pci_enable_wake - enable PCI device as wakeup event source
965 * @dev: PCI device affected 1080 * @dev: PCI device affected
966 * @state: PCI state from which device will issue wakeup events 1081 * @state: PCI state from which device will issue wakeup events
@@ -971,66 +1086,173 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
971 * called automatically by this routine. 1086 * called automatically by this routine.
972 * 1087 *
973 * Devices with legacy power management (no standard PCI PM capabilities) 1088 * Devices with legacy power management (no standard PCI PM capabilities)
974 * always require such platform hooks. Depending on the platform, devices 1089 * always require such platform hooks.
975 * supporting the standard PCI PME# signal may require such platform hooks;
976 * they always update bits in config space to allow PME# generation.
977 * 1090 *
978 * -EIO is returned if the device can't ever be a wakeup event source. 1091 * RETURN VALUE:
979 * -EINVAL is returned if the device can't generate wakeup events from 1092 * 0 is returned on success
980 * the specified PCI state. Returns zero if the operation is successful. 1093 * -EINVAL is returned if device is not supposed to wake up the system
1094 * Error code depending on the platform is returned if both the platform and
1095 * the native mechanism fail to enable the generation of wake-up events
981 */ 1096 */
982int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) 1097int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
983{ 1098{
984 int pm; 1099 int error = 0;
985 int status; 1100 bool pme_done = false;
986 u16 value; 1101
987 1102 if (!device_may_wakeup(&dev->dev))
988 /* Note that drivers should verify device_may_wakeup(&dev->dev) 1103 return -EINVAL;
989 * before calling this function. Platform code should report 1104
990 * errors when drivers try to enable wakeup on devices that 1105 /*
991 * can't issue wakeups, or on which wakeups were disabled by 1106 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
992 * userspace updating the /sys/devices.../power/wakeup file. 1107 * Anderson we should be doing PME# wake enable followed by ACPI wake
1108 * enable. To disable wake-up we call the platform first, for symmetry.
993 */ 1109 */
994 1110
995 status = call_platform_enable_wakeup(&dev->dev, enable); 1111 if (!enable && platform_pci_can_wakeup(dev))
1112 error = platform_pci_sleep_wake(dev, false);
996 1113
997 /* find PCI PM capability in list */ 1114 if (!enable || pci_pme_capable(dev, state)) {
998 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1115 pci_pme_active(dev, enable);
1116 pme_done = true;
1117 }
999 1118
1000 /* If device doesn't support PM Capabilities, but caller wants to 1119 if (enable && platform_pci_can_wakeup(dev))
1001 * disable wake events, it's a NOP. Otherwise fail unless the 1120 error = platform_pci_sleep_wake(dev, true);
1002 * platform hooks handled this legacy device already.
1003 */
1004 if (!pm)
1005 return enable ? status : 0;
1006 1121
1007 /* Check device's ability to generate PME# */ 1122 return pme_done ? 0 : error;
1008 pci_read_config_word(dev,pm+PCI_PM_PMC,&value); 1123}
1009 1124
1010 value &= PCI_PM_CAP_PME_MASK; 1125/**
1011 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ 1126 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into
1127 * a sleep state
1128 * @dev: Device to handle.
1129 *
1130 * Choose the power state appropriate for the device depending on whether
1131 * it can wake up the system and/or is power manageable by the platform
1132 * (PCI_D3hot is the default) and put the device into that state.
1133 */
1134int pci_prepare_to_sleep(struct pci_dev *dev)
1135{
1136 pci_power_t target_state = PCI_D3hot;
1137 int error;
1012 1138
1013 /* Check if it can generate PME# from requested state. */ 1139 if (platform_pci_power_manageable(dev)) {
1014 if (!value || !(value & (1 << state))) { 1140 /*
1015 /* if it can't, revert what the platform hook changed, 1141 * Call the platform to choose the target state of the device
1016 * always reporting the base "EINVAL, can't PME#" error 1142 * and enable wake-up from this state if supported.
1017 */ 1143 */
1018 if (enable) 1144 pci_power_t state = platform_pci_choose_state(dev);
1019 call_platform_enable_wakeup(&dev->dev, 0); 1145
1020 return enable ? -EINVAL : 0; 1146 switch (state) {
1147 case PCI_POWER_ERROR:
1148 case PCI_UNKNOWN:
1149 break;
1150 case PCI_D1:
1151 case PCI_D2:
1152 if (pci_no_d1d2(dev))
1153 break;
1154 default:
1155 target_state = state;
1156 }
1157 } else if (device_may_wakeup(&dev->dev)) {
1158 /*
1159 * Find the deepest state from which the device can generate
1160 * wake-up events, make it the target state and enable device
1161 * to generate PME#.
1162 */
1163 if (!dev->pm_cap)
1164 return -EIO;
1165
1166 if (dev->pme_support) {
1167 while (target_state
1168 && !(dev->pme_support & (1 << target_state)))
1169 target_state--;
1170 }
1021 } 1171 }
1022 1172
1023 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); 1173 pci_enable_wake(dev, target_state, true);
1024 1174
1025 /* Clear PME_Status by writing 1 to it and enable PME# */ 1175 error = pci_set_power_state(dev, target_state);
1026 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1027 1176
1028 if (!enable) 1177 if (error)
1029 value &= ~PCI_PM_CTRL_PME_ENABLE; 1178 pci_enable_wake(dev, target_state, false);
1030 1179
1031 pci_write_config_word(dev, pm + PCI_PM_CTRL, value); 1180 return error;
1181}
1032 1182
1033 return 0; 1183/**
1184 * pci_back_from_sleep - turn PCI device on during system-wide transition into
1185 * the working state a sleep state
1186 * @dev: Device to handle.
1187 *
1188 * Disable device's sytem wake-up capability and put it into D0.
1189 */
1190int pci_back_from_sleep(struct pci_dev *dev)
1191{
1192 pci_enable_wake(dev, PCI_D0, false);
1193 return pci_set_power_state(dev, PCI_D0);
1194}
1195
1196/**
1197 * pci_pm_init - Initialize PM functions of given PCI device
1198 * @dev: PCI device to handle.
1199 */
1200void pci_pm_init(struct pci_dev *dev)
1201{
1202 int pm;
1203 u16 pmc;
1204
1205 dev->pm_cap = 0;
1206
1207 /* find PCI PM capability in list */
1208 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1209 if (!pm)
1210 return;
1211 /* Check device's ability to generate PME# */
1212 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1213
1214 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1215 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1216 pmc & PCI_PM_CAP_VER_MASK);
1217 return;
1218 }
1219
1220 dev->pm_cap = pm;
1221
1222 dev->d1_support = false;
1223 dev->d2_support = false;
1224 if (!pci_no_d1d2(dev)) {
1225 if (pmc & PCI_PM_CAP_D1) {
1226 dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n");
1227 dev->d1_support = true;
1228 }
1229 if (pmc & PCI_PM_CAP_D2) {
1230 dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n");
1231 dev->d2_support = true;
1232 }
1233 }
1234
1235 pmc &= PCI_PM_CAP_PME_MASK;
1236 if (pmc) {
1237 dev_printk(KERN_INFO, &dev->dev,
1238 "PME# supported from%s%s%s%s%s\n",
1239 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1240 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1241 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1242 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1243 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1244 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1245 /*
1246 * Make device's PM flags reflect the wake-up capability, but
1247 * let the user space enable it to wake up the system as needed.
1248 */
1249 device_set_wakeup_capable(&dev->dev, true);
1250 device_set_wakeup_enable(&dev->dev, false);
1251 /* Disable the PME# generation functionality */
1252 pci_pme_active(dev, false);
1253 } else {
1254 dev->pme_support = 0;
1255 }
1034} 1256}
1035 1257
1036int 1258int
@@ -1116,13 +1338,11 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1116 return 0; 1338 return 0;
1117 1339
1118err_out: 1340err_out:
1119 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " 1341 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n",
1120 "for device %s\n", 1342 bar,
1121 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", 1343 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
1122 bar + 1, /* PCI BAR # */ 1344 (unsigned long long)pci_resource_start(pdev, bar),
1123 (unsigned long long)pci_resource_len(pdev, bar), 1345 (unsigned long long)pci_resource_end(pdev, bar));
1124 (unsigned long long)pci_resource_start(pdev, bar),
1125 pci_name(pdev));
1126 return -EBUSY; 1346 return -EBUSY;
1127} 1347}
1128 1348
@@ -1214,7 +1434,7 @@ pci_set_master(struct pci_dev *dev)
1214 1434
1215 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1435 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1216 if (! (cmd & PCI_COMMAND_MASTER)) { 1436 if (! (cmd & PCI_COMMAND_MASTER)) {
1217 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); 1437 dev_dbg(&dev->dev, "enabling bus mastering\n");
1218 cmd |= PCI_COMMAND_MASTER; 1438 cmd |= PCI_COMMAND_MASTER;
1219 pci_write_config_word(dev, PCI_COMMAND, cmd); 1439 pci_write_config_word(dev, PCI_COMMAND, cmd);
1220 } 1440 }
@@ -1279,8 +1499,8 @@ pci_set_cacheline_size(struct pci_dev *dev)
1279 if (cacheline_size == pci_cache_line_size) 1499 if (cacheline_size == pci_cache_line_size)
1280 return 0; 1500 return 0;
1281 1501
1282 printk(KERN_DEBUG "PCI: cache line size of %d is not supported " 1502 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
1283 "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); 1503 "supported\n", pci_cache_line_size << 2);
1284 1504
1285 return -EINVAL; 1505 return -EINVAL;
1286} 1506}
@@ -1305,8 +1525,7 @@ pci_set_mwi(struct pci_dev *dev)
1305 1525
1306 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1526 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1307 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 1527 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
1308 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", 1528 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1309 pci_name(dev));
1310 cmd |= PCI_COMMAND_INVALIDATE; 1529 cmd |= PCI_COMMAND_INVALIDATE;
1311 pci_write_config_word(dev, PCI_COMMAND, cmd); 1530 pci_write_config_word(dev, PCI_COMMAND, cmd);
1312 } 1531 }
@@ -1702,5 +1921,7 @@ EXPORT_SYMBOL(pci_set_power_state);
1702EXPORT_SYMBOL(pci_save_state); 1921EXPORT_SYMBOL(pci_save_state);
1703EXPORT_SYMBOL(pci_restore_state); 1922EXPORT_SYMBOL(pci_restore_state);
1704EXPORT_SYMBOL(pci_enable_wake); 1923EXPORT_SYMBOL(pci_enable_wake);
1924EXPORT_SYMBOL(pci_prepare_to_sleep);
1925EXPORT_SYMBOL(pci_back_from_sleep);
1705EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 1926EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1706 1927
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 00408c97e5fc..d807cd786f20 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -5,11 +5,36 @@ extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
5extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); 5extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
6extern void pci_cleanup_rom(struct pci_dev *dev); 6extern void pci_cleanup_rom(struct pci_dev *dev);
7 7
8/* Firmware callbacks */ 8/**
9extern pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev, 9 * Firmware PM callbacks
10 pm_message_t state); 10 *
11extern int (*platform_pci_set_power_state)(struct pci_dev *dev, 11 * @is_manageable - returns 'true' if given device is power manageable by the
12 pci_power_t state); 12 * platform firmware
13 *
14 * @set_state - invokes the platform firmware to set the device's power state
15 *
16 * @choose_state - returns PCI power state of given device preferred by the
17 * platform; to be used during system-wide transitions from a
18 * sleeping state to the working state and vice versa
19 *
20 * @can_wakeup - returns 'true' if given device is capable of waking up the
21 * system from a sleeping state
22 *
23 * @sleep_wake - enables/disables the system wake up capability of given device
24 *
25 * If given platform is generally capable of power managing PCI devices, all of
26 * these callbacks are mandatory.
27 */
28struct pci_platform_pm_ops {
29 bool (*is_manageable)(struct pci_dev *dev);
30 int (*set_state)(struct pci_dev *dev, pci_power_t state);
31 pci_power_t (*choose_state)(struct pci_dev *dev);
32 bool (*can_wakeup)(struct pci_dev *dev);
33 int (*sleep_wake)(struct pci_dev *dev, bool enable);
34};
35
36extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
37extern void pci_pm_init(struct pci_dev *dev);
13 38
14extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 39extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
15extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 40extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
@@ -106,3 +131,16 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
106} 131}
107 132
108struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); 133struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
134
135/* PCI slot sysfs helper code */
136#define to_pci_slot(s) container_of(s, struct pci_slot, kobj)
137
138extern struct kset *pci_slots_kset;
139
140struct pci_slot_attribute {
141 struct attribute attr;
142 ssize_t (*show)(struct pci_slot *, char *);
143 ssize_t (*store)(struct pci_slot *, const char *, size_t);
144};
145#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr)
146
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 07c3bdb6edc2..77036f46acfe 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -26,6 +26,7 @@
26#include <linux/pcieport_if.h> 26#include <linux/pcieport_if.h>
27 27
28#include "aerdrv.h" 28#include "aerdrv.h"
29#include "../../pci.h"
29 30
30/* 31/*
31 * Version Information 32 * Version Information
@@ -219,8 +220,7 @@ static int __devinit aer_probe (struct pcie_device *dev,
219 220
220 /* Alloc rpc data structure */ 221 /* Alloc rpc data structure */
221 if (!(rpc = aer_alloc_rpc(dev))) { 222 if (!(rpc = aer_alloc_rpc(dev))) {
222 printk(KERN_DEBUG "%s: Alloc rpc fails on PCIE device[%s]\n", 223 dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
223 __func__, device->bus_id);
224 aer_remove(dev); 224 aer_remove(dev);
225 return -ENOMEM; 225 return -ENOMEM;
226 } 226 }
@@ -228,8 +228,7 @@ static int __devinit aer_probe (struct pcie_device *dev,
228 /* Request IRQ ISR */ 228 /* Request IRQ ISR */
229 if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", 229 if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv",
230 dev))) { 230 dev))) {
231 printk(KERN_DEBUG "%s: Request ISR fails on PCIE device[%s]\n", 231 dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
232 __func__, device->bus_id);
233 aer_remove(dev); 232 aer_remove(dev);
234 return status; 233 return status;
235 } 234 }
@@ -273,7 +272,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
273 * to issue Configuration Requests to those devices. 272 * to issue Configuration Requests to those devices.
274 */ 273 */
275 msleep(200); 274 msleep(200);
276 printk(KERN_DEBUG "Complete link reset at Root[%s]\n", dev->dev.bus_id); 275 dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
277 276
278 /* Enable Root Port's interrupt in response to error messages */ 277 /* Enable Root Port's interrupt in response to error messages */
279 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); 278 pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index d39a78dbd026..30f581b8791f 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -50,10 +50,10 @@ int aer_osc_setup(struct pcie_device *pciedev)
50 } 50 }
51 51
52 if (ACPI_FAILURE(status)) { 52 if (ACPI_FAILURE(status)) {
53 printk(KERN_DEBUG "AER service couldn't init device %s - %s\n", 53 dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
54 pciedev->device.bus_id, 54 "init device: %s\n",
55 (status == AE_SUPPORT || status == AE_NOT_FOUND) ? 55 (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
56 "no _OSC support" : "Run ACPI _OSC fails"); 56 "no _OSC support" : "_OSC failed");
57 return -1; 57 return -1;
58 } 58 }
59 59
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index aaa82392d1dc..ee5e7b5176d0 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -221,9 +221,9 @@ static void report_error_detected(struct pci_dev *dev, void *data)
221 * of a driver for this device is unaware of 221 * of a driver for this device is unaware of
222 * its hw state. 222 * its hw state.
223 */ 223 */
224 printk(KERN_DEBUG "Device ID[%s] has %s\n", 224 dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
225 dev->dev.bus_id, (dev->driver) ? 225 dev->driver ?
226 "no AER-aware driver" : "no driver"); 226 "no AER-aware driver" : "no driver");
227 } 227 }
228 return; 228 return;
229 } 229 }
@@ -304,7 +304,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
304{ 304{
305 struct aer_broadcast_data result_data; 305 struct aer_broadcast_data result_data;
306 306
307 printk(KERN_DEBUG "Broadcast %s message\n", error_mesg); 307 dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
308 result_data.state = state; 308 result_data.state = state;
309 if (cb == report_error_detected) 309 if (cb == report_error_detected)
310 result_data.result = PCI_ERS_RESULT_CAN_RECOVER; 310 result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
@@ -404,18 +404,16 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
404 data.aer_driver = 404 data.aer_driver =
405 to_service_driver(aerdev->device.driver); 405 to_service_driver(aerdev->device.driver);
406 } else { 406 } else {
407 printk(KERN_DEBUG "No link-reset support to Device ID" 407 dev_printk(KERN_DEBUG, &dev->dev, "no link-reset "
408 "[%s]\n", 408 "support\n");
409 dev->dev.bus_id);
410 return PCI_ERS_RESULT_DISCONNECT; 409 return PCI_ERS_RESULT_DISCONNECT;
411 } 410 }
412 } 411 }
413 412
414 status = data.aer_driver->reset_link(udev); 413 status = data.aer_driver->reset_link(udev);
415 if (status != PCI_ERS_RESULT_RECOVERED) { 414 if (status != PCI_ERS_RESULT_RECOVERED) {
416 printk(KERN_DEBUG "Link reset at upstream Device ID" 415 dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream "
417 "[%s] failed\n", 416 "device %s failed\n", pci_name(udev));
418 udev->dev.bus_id);
419 return PCI_ERS_RESULT_DISCONNECT; 417 return PCI_ERS_RESULT_DISCONNECT;
420 } 418 }
421 419
@@ -511,10 +509,12 @@ static void handle_error_source(struct pcie_device * aerdev,
511 } else { 509 } else {
512 status = do_recovery(aerdev, dev, info.severity); 510 status = do_recovery(aerdev, dev, info.severity);
513 if (status == PCI_ERS_RESULT_RECOVERED) { 511 if (status == PCI_ERS_RESULT_RECOVERED) {
514 printk(KERN_DEBUG "AER driver successfully recovered\n"); 512 dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
513 "successfully recovered\n");
515 } else { 514 } else {
516 /* TODO: Should kernel panic here? */ 515 /* TODO: Should kernel panic here? */
517 printk(KERN_DEBUG "AER driver didn't recover\n"); 516 dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't "
517 "recover\n");
518 } 518 }
519 } 519 }
520} 520}
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index 3f0976868eda..359fe5568df1 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -13,6 +13,7 @@
13#include <linux/pm.h> 13#include <linux/pm.h>
14 14
15#include <linux/pcieport_if.h> 15#include <linux/pcieport_if.h>
16#include "portdrv.h"
16 17
17static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); 18static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
18static int pcie_port_bus_suspend(struct device *dev, pm_message_t state); 19static int pcie_port_bus_suspend(struct device *dev, pm_message_t state);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index fb0abfa508dc..890f0d2b370a 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -23,20 +23,20 @@ static int pcie_port_probe_service(struct device *dev)
23{ 23{
24 struct pcie_device *pciedev; 24 struct pcie_device *pciedev;
25 struct pcie_port_service_driver *driver; 25 struct pcie_port_service_driver *driver;
26 int status = -ENODEV; 26 int status;
27 27
28 if (!dev || !dev->driver) 28 if (!dev || !dev->driver)
29 return status; 29 return -ENODEV;
30 30
31 driver = to_service_driver(dev->driver); 31 driver = to_service_driver(dev->driver);
32 if (!driver || !driver->probe) 32 if (!driver || !driver->probe)
33 return status; 33 return -ENODEV;
34 34
35 pciedev = to_pcie_device(dev); 35 pciedev = to_pcie_device(dev);
36 status = driver->probe(pciedev, driver->id_table); 36 status = driver->probe(pciedev, driver->id_table);
37 if (!status) { 37 if (!status) {
38 printk(KERN_DEBUG "Load service driver %s on pcie device %s\n", 38 dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
39 driver->name, dev->bus_id); 39 driver->name);
40 get_device(dev); 40 get_device(dev);
41 } 41 }
42 return status; 42 return status;
@@ -53,8 +53,8 @@ static int pcie_port_remove_service(struct device *dev)
53 pciedev = to_pcie_device(dev); 53 pciedev = to_pcie_device(dev);
54 driver = to_service_driver(dev->driver); 54 driver = to_service_driver(dev->driver);
55 if (driver && driver->remove) { 55 if (driver && driver->remove) {
56 printk(KERN_DEBUG "Unload service driver %s on pcie device %s\n", 56 dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
57 driver->name, dev->bus_id); 57 driver->name);
58 driver->remove(pciedev); 58 driver->remove(pciedev);
59 put_device(dev); 59 put_device(dev);
60 } 60 }
@@ -103,7 +103,7 @@ static int pcie_port_resume_service(struct device *dev)
103 */ 103 */
104static void release_pcie_device(struct device *dev) 104static void release_pcie_device(struct device *dev)
105{ 105{
106 printk(KERN_DEBUG "Free Port Service[%s]\n", dev->bus_id); 106 dev_printk(KERN_DEBUG, dev, "free port service\n");
107 kfree(to_pcie_device(dev)); 107 kfree(to_pcie_device(dev));
108} 108}
109 109
@@ -150,7 +150,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
150 if (pos) { 150 if (pos) {
151 struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = 151 struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] =
152 {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; 152 {{0, 0}, {0, 1}, {0, 2}, {0, 3}};
153 printk("%s Found MSIX capability\n", __func__); 153 dev_info(&dev->dev, "found MSI-X capability\n");
154 status = pci_enable_msix(dev, msix_entries, nvec); 154 status = pci_enable_msix(dev, msix_entries, nvec);
155 if (!status) { 155 if (!status) {
156 int j = 0; 156 int j = 0;
@@ -165,7 +165,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
165 if (status) { 165 if (status) {
166 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 166 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
167 if (pos) { 167 if (pos) {
168 printk("%s Found MSI capability\n", __func__); 168 dev_info(&dev->dev, "found MSI capability\n");
169 status = pci_enable_msi(dev); 169 status = pci_enable_msi(dev);
170 if (!status) { 170 if (!status) {
171 interrupt_mode = PCIE_PORT_MSI_MODE; 171 interrupt_mode = PCIE_PORT_MSI_MODE;
@@ -252,7 +252,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
252 return NULL; 252 return NULL;
253 253
254 pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); 254 pcie_device_init(parent, device, port_type, service_type, irq,irq_mode);
255 printk(KERN_DEBUG "Allocate Port Service[%s]\n", device->device.bus_id); 255 dev_printk(KERN_DEBUG, &device->device, "allocate port service\n");
256 return device; 256 return device;
257} 257}
258 258
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 51d163238d93..367c9c20000d 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -91,9 +91,8 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
91 91
92 pci_set_master(dev); 92 pci_set_master(dev);
93 if (!dev->irq && dev->pin) { 93 if (!dev->irq && dev->pin) {
94 printk(KERN_WARNING 94 dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; "
95 "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", 95 "check vendor BIOS\n", dev->vendor, dev->device);
96 __func__, dev->vendor, dev->device);
97 } 96 }
98 if (pcie_port_device_register(dev)) { 97 if (pcie_port_device_register(dev)) {
99 pci_disable_device(dev); 98 pci_disable_device(dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3706ce7972dd..b1724cf31b66 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -277,8 +277,8 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
277 res->end = res->start + sz64; 277 res->end = res->start + sz64;
278#else 278#else
279 if (sz64 > 0x100000000ULL) { 279 if (sz64 > 0x100000000ULL) {
280 printk(KERN_ERR "PCI: Unable to handle 64-bit " 280 dev_err(&dev->dev, "BAR %d: can't handle 64-bit"
281 "BAR for device %s\n", pci_name(dev)); 281 " BAR\n", pos);
282 res->start = 0; 282 res->start = 0;
283 res->flags = 0; 283 res->flags = 0;
284 } else if (lhi) { 284 } else if (lhi) {
@@ -329,7 +329,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
329 return; 329 return;
330 330
331 if (dev->transparent) { 331 if (dev->transparent) {
332 printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev)); 332 dev_info(&dev->dev, "transparent bridge\n");
333 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) 333 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
334 child->resource[i] = child->parent->resource[i - 3]; 334 child->resource[i] = child->parent->resource[i - 3];
335 } 335 }
@@ -392,7 +392,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
392 limit |= ((long) mem_limit_hi) << 32; 392 limit |= ((long) mem_limit_hi) << 32;
393#else 393#else
394 if (mem_base_hi || mem_limit_hi) { 394 if (mem_base_hi || mem_limit_hi) {
395 printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev)); 395 dev_err(&dev->dev, "can't handle 64-bit "
396 "address space for bridge\n");
396 return; 397 return;
397 } 398 }
398#endif 399#endif
@@ -414,6 +415,7 @@ static struct pci_bus * pci_alloc_bus(void)
414 INIT_LIST_HEAD(&b->node); 415 INIT_LIST_HEAD(&b->node);
415 INIT_LIST_HEAD(&b->children); 416 INIT_LIST_HEAD(&b->children);
416 INIT_LIST_HEAD(&b->devices); 417 INIT_LIST_HEAD(&b->devices);
418 INIT_LIST_HEAD(&b->slots);
417 } 419 }
418 return b; 420 return b;
419} 421}
@@ -511,8 +513,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
511 513
512 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 514 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
513 515
514 pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n", 516 dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
515 pci_name(dev), buses & 0xffffff, pass); 517 buses & 0xffffff, pass);
516 518
517 /* Disable MasterAbortMode during probing to avoid reporting 519 /* Disable MasterAbortMode during probing to avoid reporting
518 of bus errors (in some architectures) */ 520 of bus errors (in some architectures) */
@@ -535,8 +537,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
535 * ignore it. This can happen with the i450NX chipset. 537 * ignore it. This can happen with the i450NX chipset.
536 */ 538 */
537 if (pci_find_bus(pci_domain_nr(bus), busnr)) { 539 if (pci_find_bus(pci_domain_nr(bus), busnr)) {
538 printk(KERN_INFO "PCI: Bus %04x:%02x already known\n", 540 dev_info(&dev->dev, "bus %04x:%02x already known\n",
539 pci_domain_nr(bus), busnr); 541 pci_domain_nr(bus), busnr);
540 goto out; 542 goto out;
541 } 543 }
542 544
@@ -711,8 +713,9 @@ static int pci_setup_device(struct pci_dev * dev)
711{ 713{
712 u32 class; 714 u32 class;
713 715
714 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 716 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
715 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); 717 dev->bus->number, PCI_SLOT(dev->devfn),
718 PCI_FUNC(dev->devfn));
716 719
717 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 720 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
718 dev->revision = class & 0xff; 721 dev->revision = class & 0xff;
@@ -720,7 +723,7 @@ static int pci_setup_device(struct pci_dev * dev)
720 dev->class = class; 723 dev->class = class;
721 class >>= 8; 724 class >>= 8;
722 725
723 pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev), 726 dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n",
724 dev->vendor, dev->device, class, dev->hdr_type); 727 dev->vendor, dev->device, class, dev->hdr_type);
725 728
726 /* "Unknown power state" */ 729 /* "Unknown power state" */
@@ -788,13 +791,13 @@ static int pci_setup_device(struct pci_dev * dev)
788 break; 791 break;
789 792
790 default: /* unknown header */ 793 default: /* unknown header */
791 printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", 794 dev_err(&dev->dev, "unknown header type %02x, "
792 pci_name(dev), dev->hdr_type); 795 "ignoring device\n", dev->hdr_type);
793 return -1; 796 return -1;
794 797
795 bad: 798 bad:
796 printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", 799 dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
797 pci_name(dev), class, dev->hdr_type); 800 "type %02x)\n", class, dev->hdr_type);
798 dev->class = PCI_CLASS_NOT_DEFINED; 801 dev->class = PCI_CLASS_NOT_DEFINED;
799 } 802 }
800 803
@@ -927,7 +930,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
927 return NULL; 930 return NULL;
928 /* Card hasn't responded in 60 seconds? Must be stuck. */ 931 /* Card hasn't responded in 60 seconds? Must be stuck. */
929 if (delay > 60 * 1000) { 932 if (delay > 60 * 1000) {
930 printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " 933 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
931 "responding\n", pci_domain_nr(bus), 934 "responding\n", pci_domain_nr(bus),
932 bus->number, PCI_SLOT(devfn), 935 bus->number, PCI_SLOT(devfn),
933 PCI_FUNC(devfn)); 936 PCI_FUNC(devfn));
@@ -984,6 +987,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
984 /* Fix up broken headers */ 987 /* Fix up broken headers */
985 pci_fixup_device(pci_fixup_header, dev); 988 pci_fixup_device(pci_fixup_header, dev);
986 989
990 /* Initialize power management of the device */
991 pci_pm_init(dev);
992
987 /* 993 /*
988 * Add the device to our list of discovered devices 994 * Add the device to our list of discovered devices
989 * and the bus list for fixup functions, etc. 995 * and the bus list for fixup functions, etc.
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 963a97642ae9..4400dffbd93a 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: proc.c,v 1.13 1998/05/12 07:36:07 mj Exp $
3 *
4 * Procfs interface for the PCI bus. 2 * Procfs interface for the PCI bus.
5 * 3 *
6 * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz> 4 * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz>
@@ -482,5 +480,5 @@ static int __init pci_proc_init(void)
482 return 0; 480 return 0;
483} 481}
484 482
485__initcall(pci_proc_init); 483device_initcall(pci_proc_init);
486 484
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 338a3f94b4d4..12d489395fad 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -556,7 +556,7 @@ static void quirk_via_ioapic(struct pci_dev *dev)
556 pci_write_config_byte (dev, 0x58, tmp); 556 pci_write_config_byte (dev, 0x58, tmp);
557} 557}
558DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); 558DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
559DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); 559DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
560 560
561/* 561/*
562 * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. 562 * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
@@ -576,7 +576,7 @@ static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
576 } 576 }
577} 577}
578DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); 578DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
579DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); 579DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
580 580
581/* 581/*
582 * The AMD io apic can hang the box when an apic irq is masked. 582 * The AMD io apic can hang the box when an apic irq is masked.
@@ -622,7 +622,7 @@ static void quirk_amd_8131_ioapic(struct pci_dev *dev)
622 } 622 }
623} 623}
624DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); 624DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
625DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); 625DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
626#endif /* CONFIG_X86_IO_APIC */ 626#endif /* CONFIG_X86_IO_APIC */
627 627
628/* 628/*
@@ -774,7 +774,7 @@ static void quirk_cardbus_legacy(struct pci_dev *dev)
774 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); 774 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
775} 775}
776DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); 776DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
777DECLARE_PCI_FIXUP_RESUME(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); 777DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
778 778
779/* 779/*
780 * Following the PCI ordering rules is optional on the AMD762. I'm not 780 * Following the PCI ordering rules is optional on the AMD762. I'm not
@@ -797,7 +797,7 @@ static void quirk_amd_ordering(struct pci_dev *dev)
797 } 797 }
798} 798}
799DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); 799DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
800DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); 800DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
801 801
802/* 802/*
803 * DreamWorks provided workaround for Dunord I-3000 problem 803 * DreamWorks provided workaround for Dunord I-3000 problem
@@ -865,7 +865,7 @@ static void quirk_disable_pxb(struct pci_dev *pdev)
865 } 865 }
866} 866}
867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); 867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
868DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); 868DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
869 869
870static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) 870static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
871{ 871{
@@ -885,9 +885,9 @@ static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
885 } 885 }
886} 886}
887DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); 887DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
888DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); 888DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
889DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 889DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
890DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 890DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
891 891
892/* 892/*
893 * Serverworks CSB5 IDE does not fully support native mode 893 * Serverworks CSB5 IDE does not fully support native mode
@@ -1054,6 +1054,20 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1054 * its on-board VGA controller */ 1054 * its on-board VGA controller */
1055 asus_hides_smbus = 1; 1055 asus_hides_smbus = 1;
1056 } 1056 }
1057 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG)
1058 switch(dev->subsystem_device) {
1059 case 0x00b8: /* Compaq Evo D510 CMT */
1060 case 0x00b9: /* Compaq Evo D510 SFF */
1061 asus_hides_smbus = 1;
1062 }
1063 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1064 switch (dev->subsystem_device) {
1065 case 0x001A: /* Compaq Deskpro EN SSF P667 815E */
1066 /* Motherboard doesn't have host bridge
1067 * subvendor/subdevice IDs, therefore checking
1068 * its on-board VGA controller */
1069 asus_hides_smbus = 1;
1070 }
1057 } 1071 }
1058} 1072}
1059DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge); 1073DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
@@ -1068,6 +1082,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as
1068DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); 1082DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1069 1083
1070DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); 1084DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1085DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge);
1086DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1071 1087
1072static void asus_hides_smbus_lpc(struct pci_dev *dev) 1088static void asus_hides_smbus_lpc(struct pci_dev *dev)
1073{ 1089{
@@ -1093,31 +1109,61 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu
1093DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); 1109DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1094DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); 1110DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1095DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); 1111DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1096DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); 1112DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1097DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); 1113DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1098DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); 1114DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1099DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); 1115DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1100DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); 1116DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1101DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); 1117DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1102DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); 1118DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1103 1119
1104static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) 1120/* It appears we just have one such device. If not, we have a warning */
1121static void __iomem *asus_rcba_base;
1122static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1105{ 1123{
1106 u32 val, rcba; 1124 u32 rcba;
1107 void __iomem *base;
1108 1125
1109 if (likely(!asus_hides_smbus)) 1126 if (likely(!asus_hides_smbus))
1110 return; 1127 return;
1128 WARN_ON(asus_rcba_base);
1129
1111 pci_read_config_dword(dev, 0xF0, &rcba); 1130 pci_read_config_dword(dev, 0xF0, &rcba);
1112 base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); /* use bits 31:14, 16 kB aligned */ 1131 /* use bits 31:14, 16 kB aligned */
1113 if (base == NULL) return; 1132 asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
1114 val=readl(base + 0x3418); /* read the Function Disable register, dword mode only */ 1133 if (asus_rcba_base == NULL)
1115 writel(val & 0xFFFFFFF7, base + 0x3418); /* enable the SMBus device */ 1134 return;
1116 iounmap(base); 1135}
1136
1137static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1138{
1139 u32 val;
1140
1141 if (likely(!asus_hides_smbus || !asus_rcba_base))
1142 return;
1143 /* read the Function Disable register, dword mode only */
1144 val = readl(asus_rcba_base + 0x3418);
1145 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
1146}
1147
1148static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1149{
1150 if (likely(!asus_hides_smbus || !asus_rcba_base))
1151 return;
1152 iounmap(asus_rcba_base);
1153 asus_rcba_base = NULL;
1117 dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n"); 1154 dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n");
1118} 1155}
1156
1157static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1158{
1159 asus_hides_smbus_lpc_ich6_suspend(dev);
1160 asus_hides_smbus_lpc_ich6_resume_early(dev);
1161 asus_hides_smbus_lpc_ich6_resume(dev);
1162}
1119DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); 1163DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
1120DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); 1164DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
1165DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1166DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1121 1167
1122/* 1168/*
1123 * SiS 96x south bridge: BIOS typically hides SMBus device... 1169 * SiS 96x south bridge: BIOS typically hides SMBus device...
@@ -1135,10 +1181,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_
1135DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); 1181DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1136DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); 1182DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1137DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); 1183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1138DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); 1184DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1139DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); 1185DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1140DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); 1186DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1141DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); 1187DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1142 1188
1143/* 1189/*
1144 * ... This is further complicated by the fact that some SiS96x south 1190 * ... This is further complicated by the fact that some SiS96x south
@@ -1172,7 +1218,7 @@ static void quirk_sis_503(struct pci_dev *dev)
1172 quirk_sis_96x_smbus(dev); 1218 quirk_sis_96x_smbus(dev);
1173} 1219}
1174DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); 1220DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1175DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); 1221DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1176 1222
1177 1223
1178/* 1224/*
@@ -1205,7 +1251,7 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev)
1205 } 1251 }
1206} 1252}
1207DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); 1253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1208DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); 1254DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1209 1255
1210#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) 1256#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1211 1257
@@ -1270,12 +1316,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, qui
1270DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); 1316DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1271DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); 1317DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1272DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); 1318DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1273DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); 1319DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1274DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); 1320DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1275DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); 1321DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1276DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); 1322DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1277DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); 1323DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1278DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); 1324DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1279 1325
1280#endif 1326#endif
1281 1327
@@ -1521,6 +1567,10 @@ extern struct pci_fixup __start_pci_fixups_enable[];
1521extern struct pci_fixup __end_pci_fixups_enable[]; 1567extern struct pci_fixup __end_pci_fixups_enable[];
1522extern struct pci_fixup __start_pci_fixups_resume[]; 1568extern struct pci_fixup __start_pci_fixups_resume[];
1523extern struct pci_fixup __end_pci_fixups_resume[]; 1569extern struct pci_fixup __end_pci_fixups_resume[];
1570extern struct pci_fixup __start_pci_fixups_resume_early[];
1571extern struct pci_fixup __end_pci_fixups_resume_early[];
1572extern struct pci_fixup __start_pci_fixups_suspend[];
1573extern struct pci_fixup __end_pci_fixups_suspend[];
1524 1574
1525 1575
1526void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) 1576void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
@@ -1553,6 +1603,16 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
1553 end = __end_pci_fixups_resume; 1603 end = __end_pci_fixups_resume;
1554 break; 1604 break;
1555 1605
1606 case pci_fixup_resume_early:
1607 start = __start_pci_fixups_resume_early;
1608 end = __end_pci_fixups_resume_early;
1609 break;
1610
1611 case pci_fixup_suspend:
1612 start = __start_pci_fixups_suspend;
1613 end = __end_pci_fixups_suspend;
1614 break;
1615
1556 default: 1616 default:
1557 /* stupid compiler warning, you would think with an enum... */ 1617 /* stupid compiler warning, you would think with an enum... */
1558 return; 1618 return;
@@ -1629,7 +1689,7 @@ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
1629} 1689}
1630DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, 1690DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1631 quirk_nvidia_ck804_pcie_aer_ext_cap); 1691 quirk_nvidia_ck804_pcie_aer_ext_cap);
1632DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, 1692DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1633 quirk_nvidia_ck804_pcie_aer_ext_cap); 1693 quirk_nvidia_ck804_pcie_aer_ext_cap);
1634 1694
1635static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) 1695static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8ddb918f5f57..827c0a520e2b 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -27,13 +27,6 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29 29
30#define DEBUG_CONFIG 1
31#if DEBUG_CONFIG
32#define DBG(x...) printk(x)
33#else
34#define DBG(x...)
35#endif
36
37static void pbus_assign_resources_sorted(struct pci_bus *bus) 30static void pbus_assign_resources_sorted(struct pci_bus *bus)
38{ 31{
39 struct pci_dev *dev; 32 struct pci_dev *dev;
@@ -81,8 +74,8 @@ void pci_setup_cardbus(struct pci_bus *bus)
81 struct pci_dev *bridge = bus->self; 74 struct pci_dev *bridge = bus->self;
82 struct pci_bus_region region; 75 struct pci_bus_region region;
83 76
84 printk("PCI: Bus %d, cardbus bridge: %s\n", 77 dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n",
85 bus->number, pci_name(bridge)); 78 pci_domain_nr(bus), bus->number);
86 79
87 pcibios_resource_to_bus(bridge, &region, bus->resource[0]); 80 pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
88 if (bus->resource[0]->flags & IORESOURCE_IO) { 81 if (bus->resource[0]->flags & IORESOURCE_IO) {
@@ -90,7 +83,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
90 * The IO resource is allocated a range twice as large as it 83 * The IO resource is allocated a range twice as large as it
91 * would normally need. This allows us to set both IO regs. 84 * would normally need. This allows us to set both IO regs.
92 */ 85 */
93 printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", 86 dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n",
94 (unsigned long)region.start, 87 (unsigned long)region.start,
95 (unsigned long)region.end); 88 (unsigned long)region.end);
96 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, 89 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
@@ -101,7 +94,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
101 94
102 pcibios_resource_to_bus(bridge, &region, bus->resource[1]); 95 pcibios_resource_to_bus(bridge, &region, bus->resource[1]);
103 if (bus->resource[1]->flags & IORESOURCE_IO) { 96 if (bus->resource[1]->flags & IORESOURCE_IO) {
104 printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", 97 dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n",
105 (unsigned long)region.start, 98 (unsigned long)region.start,
106 (unsigned long)region.end); 99 (unsigned long)region.end);
107 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, 100 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
@@ -112,7 +105,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
112 105
113 pcibios_resource_to_bus(bridge, &region, bus->resource[2]); 106 pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
114 if (bus->resource[2]->flags & IORESOURCE_MEM) { 107 if (bus->resource[2]->flags & IORESOURCE_MEM) {
115 printk(KERN_INFO " PREFETCH window: 0x%08lx-0x%08lx\n", 108 dev_info(&bridge->dev, " PREFETCH window: %#08lx-%#08lx\n",
116 (unsigned long)region.start, 109 (unsigned long)region.start,
117 (unsigned long)region.end); 110 (unsigned long)region.end);
118 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, 111 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
@@ -123,7 +116,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
123 116
124 pcibios_resource_to_bus(bridge, &region, bus->resource[3]); 117 pcibios_resource_to_bus(bridge, &region, bus->resource[3]);
125 if (bus->resource[3]->flags & IORESOURCE_MEM) { 118 if (bus->resource[3]->flags & IORESOURCE_MEM) {
126 printk(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", 119 dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n",
127 (unsigned long)region.start, 120 (unsigned long)region.start,
128 (unsigned long)region.end); 121 (unsigned long)region.end);
129 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, 122 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
@@ -151,7 +144,8 @@ static void pci_setup_bridge(struct pci_bus *bus)
151 struct pci_bus_region region; 144 struct pci_bus_region region;
152 u32 l, bu, lu, io_upper16; 145 u32 l, bu, lu, io_upper16;
153 146
154 DBG(KERN_INFO "PCI: Bridge: %s\n", pci_name(bridge)); 147 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
148 pci_domain_nr(bus), bus->number);
155 149
156 /* Set up the top and bottom of the PCI I/O segment for this bus. */ 150 /* Set up the top and bottom of the PCI I/O segment for this bus. */
157 pcibios_resource_to_bus(bridge, &region, bus->resource[0]); 151 pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
@@ -162,7 +156,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
162 l |= region.end & 0xf000; 156 l |= region.end & 0xf000;
163 /* Set up upper 16 bits of I/O base/limit. */ 157 /* Set up upper 16 bits of I/O base/limit. */
164 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); 158 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
165 DBG(KERN_INFO " IO window: %04lx-%04lx\n", 159 dev_info(&bridge->dev, " IO window: %#04lx-%#04lx\n",
166 (unsigned long)region.start, 160 (unsigned long)region.start,
167 (unsigned long)region.end); 161 (unsigned long)region.end);
168 } 162 }
@@ -170,7 +164,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
170 /* Clear upper 16 bits of I/O base/limit. */ 164 /* Clear upper 16 bits of I/O base/limit. */
171 io_upper16 = 0; 165 io_upper16 = 0;
172 l = 0x00f0; 166 l = 0x00f0;
173 DBG(KERN_INFO " IO window: disabled.\n"); 167 dev_info(&bridge->dev, " IO window: disabled\n");
174 } 168 }
175 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ 169 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
176 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); 170 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
@@ -185,13 +179,13 @@ static void pci_setup_bridge(struct pci_bus *bus)
185 if (bus->resource[1]->flags & IORESOURCE_MEM) { 179 if (bus->resource[1]->flags & IORESOURCE_MEM) {
186 l = (region.start >> 16) & 0xfff0; 180 l = (region.start >> 16) & 0xfff0;
187 l |= region.end & 0xfff00000; 181 l |= region.end & 0xfff00000;
188 DBG(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", 182 dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n",
189 (unsigned long)region.start, 183 (unsigned long)region.start,
190 (unsigned long)region.end); 184 (unsigned long)region.end);
191 } 185 }
192 else { 186 else {
193 l = 0x0000fff0; 187 l = 0x0000fff0;
194 DBG(KERN_INFO " MEM window: disabled.\n"); 188 dev_info(&bridge->dev, " MEM window: disabled\n");
195 } 189 }
196 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); 190 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
197 191
@@ -208,13 +202,13 @@ static void pci_setup_bridge(struct pci_bus *bus)
208 l |= region.end & 0xfff00000; 202 l |= region.end & 0xfff00000;
209 bu = upper_32_bits(region.start); 203 bu = upper_32_bits(region.start);
210 lu = upper_32_bits(region.end); 204 lu = upper_32_bits(region.end);
211 DBG(KERN_INFO " PREFETCH window: 0x%016llx-0x%016llx\n", 205 dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n",
212 (unsigned long long)region.start, 206 (unsigned long long)region.start,
213 (unsigned long long)region.end); 207 (unsigned long long)region.end);
214 } 208 }
215 else { 209 else {
216 l = 0x0000fff0; 210 l = 0x0000fff0;
217 DBG(KERN_INFO " PREFETCH window: disabled.\n"); 211 dev_info(&bridge->dev, " PREFETCH window: disabled\n");
218 } 212 }
219 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); 213 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
220 214
@@ -361,9 +355,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
361 align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start; 355 align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start;
362 order = __ffs(align) - 20; 356 order = __ffs(align) - 20;
363 if (order > 11) { 357 if (order > 11) {
364 printk(KERN_WARNING "PCI: region %s/%d " 358 dev_warn(&dev->dev, "BAR %d too large: "
365 "too large: 0x%016llx-0x%016llx\n", 359 "%#016llx-%#016llx\n", i,
366 pci_name(dev), i,
367 (unsigned long long)r->start, 360 (unsigned long long)r->start,
368 (unsigned long long)r->end); 361 (unsigned long long)r->end);
369 r->flags = 0; 362 r->flags = 0;
@@ -529,8 +522,8 @@ void __ref pci_bus_assign_resources(struct pci_bus *bus)
529 break; 522 break;
530 523
531 default: 524 default:
532 printk(KERN_INFO "PCI: not setting up bridge %s " 525 dev_info(&dev->dev, "not setting up bridge for bus "
533 "for bus %d\n", pci_name(dev), b->number); 526 "%04x:%02x\n", pci_domain_nr(b), b->number);
534 break; 527 break;
535 } 528 }
536 } 529 }
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index 05ca2ed9eb51..aa795fd428de 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -47,8 +47,7 @@ pdev_fixup_irq(struct pci_dev *dev,
47 } 47 }
48 dev->irq = irq; 48 dev->irq = irq;
49 49
50 pr_debug("PCI: fixup irq: (%s) got %d\n", 50 dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq);
51 kobject_name(&dev->dev.kobj), dev->irq);
52 51
53 /* Always tell the device, so the driver knows what is 52 /* Always tell the device, so the driver knows what is
54 the real IRQ to use; the device does not use it. */ 53 the real IRQ to use; the device does not use it. */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 7d35cdf4579f..1a5fc83c71b3 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -26,8 +26,7 @@
26#include "pci.h" 26#include "pci.h"
27 27
28 28
29void 29void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
30pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
31{ 30{
32 struct pci_bus_region region; 31 struct pci_bus_region region;
33 u32 new, check, mask; 32 u32 new, check, mask;
@@ -43,20 +42,20 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
43 /* 42 /*
44 * Ignore non-moveable resources. This might be legacy resources for 43 * Ignore non-moveable resources. This might be legacy resources for
45 * which no functional BAR register exists or another important 44 * which no functional BAR register exists or another important
46 * system resource we should better not move around in system address 45 * system resource we shouldn't move around.
47 * space.
48 */ 46 */
49 if (res->flags & IORESOURCE_PCI_FIXED) 47 if (res->flags & IORESOURCE_PCI_FIXED)
50 return; 48 return;
51 49
52 pcibios_resource_to_bus(dev, &region, res); 50 pcibios_resource_to_bus(dev, &region, res);
53 51
54 pr_debug(" got res [%llx:%llx] bus [%llx:%llx] flags %lx for " 52 dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] "
55 "BAR %d of %s\n", (unsigned long long)res->start, 53 "flags %#lx\n", resno,
54 (unsigned long long)res->start,
56 (unsigned long long)res->end, 55 (unsigned long long)res->end,
57 (unsigned long long)region.start, 56 (unsigned long long)region.start,
58 (unsigned long long)region.end, 57 (unsigned long long)region.end,
59 (unsigned long)res->flags, resno, pci_name(dev)); 58 (unsigned long)res->flags);
60 59
61 new = region.start | (res->flags & PCI_REGION_FLAG_MASK); 60 new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
62 if (res->flags & IORESOURCE_IO) 61 if (res->flags & IORESOURCE_IO)
@@ -81,9 +80,8 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
81 pci_read_config_dword(dev, reg, &check); 80 pci_read_config_dword(dev, reg, &check);
82 81
83 if ((new ^ check) & mask) { 82 if ((new ^ check) & mask) {
84 printk(KERN_ERR "PCI: Error while updating region " 83 dev_err(&dev->dev, "BAR %d: error updating (%#08x != %#08x)\n",
85 "%s/%d (%08x != %08x)\n", pci_name(dev), resno, 84 resno, new, check);
86 new, check);
87 } 85 }
88 86
89 if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == 87 if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
@@ -92,15 +90,14 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
92 pci_write_config_dword(dev, reg + 4, new); 90 pci_write_config_dword(dev, reg + 4, new);
93 pci_read_config_dword(dev, reg + 4, &check); 91 pci_read_config_dword(dev, reg + 4, &check);
94 if (check != new) { 92 if (check != new) {
95 printk(KERN_ERR "PCI: Error updating region " 93 dev_err(&dev->dev, "BAR %d: error updating "
96 "%s/%d (high %08x != %08x)\n", 94 "(high %#08x != %#08x)\n", resno, new, check);
97 pci_name(dev), resno, new, check);
98 } 95 }
99 } 96 }
100 res->flags &= ~IORESOURCE_UNSET; 97 res->flags &= ~IORESOURCE_UNSET;
101 pr_debug("PCI: moved device %s resource %d (%lx) to %x\n", 98 dev_dbg(&dev->dev, "BAR %d: moved to bus [%#llx-%#llx] flags %#lx\n",
102 pci_name(dev), resno, res->flags, 99 resno, (unsigned long long)region.start,
103 new & ~PCI_REGION_FLAG_MASK); 100 (unsigned long long)region.end, res->flags);
104} 101}
105 102
106int pci_claim_resource(struct pci_dev *dev, int resource) 103int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -117,10 +114,11 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
117 err = insert_resource(root, res); 114 err = insert_resource(root, res);
118 115
119 if (err) { 116 if (err) {
120 printk(KERN_ERR "PCI: %s region %d of %s %s [%llx:%llx]\n", 117 dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n",
121 root ? "Address space collision on" : 118 resource,
122 "No parent found for", 119 root ? "address space collision on" :
123 resource, dtype, pci_name(dev), 120 "no parent found for",
121 dtype,
124 (unsigned long long)res->start, 122 (unsigned long long)res->start,
125 (unsigned long long)res->end); 123 (unsigned long long)res->end);
126 } 124 }
@@ -140,11 +138,10 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
140 138
141 align = resource_alignment(res); 139 align = resource_alignment(res);
142 if (!align) { 140 if (!align) {
143 printk(KERN_ERR "PCI: Cannot allocate resource (bogus " 141 dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus "
144 "alignment) %d [%llx:%llx] (flags %lx) of %s\n", 142 "alignment) [%#llx-%#llx] flags %#lx\n",
145 resno, (unsigned long long)res->start, 143 resno, (unsigned long long)res->start,
146 (unsigned long long)res->end, res->flags, 144 (unsigned long long)res->end, res->flags);
147 pci_name(dev));
148 return -EINVAL; 145 return -EINVAL;
149 } 146 }
150 147
@@ -165,11 +162,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
165 } 162 }
166 163
167 if (ret) { 164 if (ret) {
168 printk(KERN_ERR "PCI: Failed to allocate %s resource " 165 dev_err(&dev->dev, "BAR %d: can't allocate %s resource "
169 "#%d:%llx@%llx for %s\n", 166 "[%#llx-%#llx]\n", resno,
170 res->flags & IORESOURCE_IO ? "I/O" : "mem", 167 res->flags & IORESOURCE_IO ? "I/O" : "mem",
171 resno, (unsigned long long)size, 168 (unsigned long long)res->start,
172 (unsigned long long)res->start, pci_name(dev)); 169 (unsigned long long)res->end);
173 } else { 170 } else {
174 res->flags &= ~IORESOURCE_STARTALIGN; 171 res->flags &= ~IORESOURCE_STARTALIGN;
175 if (resno < PCI_BRIDGE_RESOURCES) 172 if (resno < PCI_BRIDGE_RESOURCES)
@@ -205,11 +202,11 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
205 } 202 }
206 203
207 if (ret) { 204 if (ret) {
208 printk(KERN_ERR "PCI: Failed to allocate %s resource " 205 dev_err(&dev->dev, "BAR %d: can't allocate %s resource "
209 "#%d:%llx@%llx for %s\n", 206 "[%#llx-%#llx\n]", resno,
210 res->flags & IORESOURCE_IO ? "I/O" : "mem", 207 res->flags & IORESOURCE_IO ? "I/O" : "mem",
211 resno, (unsigned long long)(res->end - res->start + 1), 208 (unsigned long long)res->start,
212 (unsigned long long)res->start, pci_name(dev)); 209 (unsigned long long)res->end);
213 } else if (resno < PCI_BRIDGE_RESOURCES) { 210 } else if (resno < PCI_BRIDGE_RESOURCES) {
214 pci_update_resource(dev, res, resno); 211 pci_update_resource(dev, res, resno);
215 } 212 }
@@ -239,11 +236,10 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
239 236
240 r_align = resource_alignment(r); 237 r_align = resource_alignment(r);
241 if (!r_align) { 238 if (!r_align) {
242 printk(KERN_WARNING "PCI: bogus alignment of resource " 239 dev_warn(&dev->dev, "BAR %d: bogus alignment "
243 "%d [%llx:%llx] (flags %lx) of %s\n", 240 "[%#llx-%#llx] flags %#lx\n",
244 i, (unsigned long long)r->start, 241 i, (unsigned long long)r->start,
245 (unsigned long long)r->end, r->flags, 242 (unsigned long long)r->end, r->flags);
246 pci_name(dev));
247 continue; 243 continue;
248 } 244 }
249 for (list = head; ; list = list->next) { 245 for (list = head; ; list = list->next) {
@@ -291,7 +287,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
291 287
292 if (!r->parent) { 288 if (!r->parent) {
293 dev_err(&dev->dev, "device not available because of " 289 dev_err(&dev->dev, "device not available because of "
294 "BAR %d [%llx:%llx] collisions\n", i, 290 "BAR %d [%#llx-%#llx] collisions\n", i,
295 (unsigned long long) r->start, 291 (unsigned long long) r->start,
296 (unsigned long long) r->end); 292 (unsigned long long) r->end);
297 return -EINVAL; 293 return -EINVAL;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
new file mode 100644
index 000000000000..7e5b85cbd948
--- /dev/null
+++ b/drivers/pci/slot.c
@@ -0,0 +1,233 @@
1/*
2 * drivers/pci/slot.c
3 * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
4 * Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P.
5 * Alex Chiang <achiang@hp.com>
6 */
7
8#include <linux/kobject.h>
9#include <linux/pci.h>
10#include <linux/err.h>
11#include "pci.h"
12
13struct kset *pci_slots_kset;
14EXPORT_SYMBOL_GPL(pci_slots_kset);
15
16static ssize_t pci_slot_attr_show(struct kobject *kobj,
17 struct attribute *attr, char *buf)
18{
19 struct pci_slot *slot = to_pci_slot(kobj);
20 struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
21 return attribute->show ? attribute->show(slot, buf) : -EIO;
22}
23
24static ssize_t pci_slot_attr_store(struct kobject *kobj,
25 struct attribute *attr, const char *buf, size_t len)
26{
27 struct pci_slot *slot = to_pci_slot(kobj);
28 struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
29 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
30}
31
32static struct sysfs_ops pci_slot_sysfs_ops = {
33 .show = pci_slot_attr_show,
34 .store = pci_slot_attr_store,
35};
36
37static ssize_t address_read_file(struct pci_slot *slot, char *buf)
38{
39 if (slot->number == 0xff)
40 return sprintf(buf, "%04x:%02x\n",
41 pci_domain_nr(slot->bus),
42 slot->bus->number);
43 else
44 return sprintf(buf, "%04x:%02x:%02x\n",
45 pci_domain_nr(slot->bus),
46 slot->bus->number,
47 slot->number);
48}
49
50static void pci_slot_release(struct kobject *kobj)
51{
52 struct pci_slot *slot = to_pci_slot(kobj);
53
54 pr_debug("%s: releasing pci_slot on %x:%d\n", __func__,
55 slot->bus->number, slot->number);
56
57 list_del(&slot->list);
58
59 kfree(slot);
60}
61
62static struct pci_slot_attribute pci_slot_attr_address =
63 __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL);
64
65static struct attribute *pci_slot_default_attrs[] = {
66 &pci_slot_attr_address.attr,
67 NULL,
68};
69
70static struct kobj_type pci_slot_ktype = {
71 .sysfs_ops = &pci_slot_sysfs_ops,
72 .release = &pci_slot_release,
73 .default_attrs = pci_slot_default_attrs,
74};
75
76/**
77 * pci_create_slot - create or increment refcount for physical PCI slot
78 * @parent: struct pci_bus of parent bridge
79 * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
80 * @name: user visible string presented in /sys/bus/pci/slots/<name>
81 *
82 * PCI slots have first class attributes such as address, speed, width,
83 * and a &struct pci_slot is used to manage them. This interface will
84 * either return a new &struct pci_slot to the caller, or if the pci_slot
85 * already exists, its refcount will be incremented.
86 *
87 * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple.
88 *
89 * Placeholder slots:
90 * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
91 * a slot. There is one notable exception - pSeries (rpaphp), where the
92 * @slot_nr cannot be determined until a device is actually inserted into
93 * the slot. In this scenario, the caller may pass -1 for @slot_nr.
94 *
95 * The following semantics are imposed when the caller passes @slot_nr ==
96 * -1. First, the check for existing %struct pci_slot is skipped, as the
97 * caller may know about several unpopulated slots on a given %struct
98 * pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for
99 * these slots is then determined by the @name parameter. We expect
100 * kobject_init_and_add() to warn us if the caller attempts to create
101 * multiple slots with the same name. The other change in semantics is
102 * user-visible, which is the 'address' parameter presented in sysfs will
103 * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
104 * %struct pci_bus and bb is the bus number. In other words, the devfn of
105 * the 'placeholder' slot will not be displayed.
106 */
107
108struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
109 const char *name)
110{
111 struct pci_slot *slot;
112 int err;
113
114 down_write(&pci_bus_sem);
115
116 if (slot_nr == -1)
117 goto placeholder;
118
119 /* If we've already created this slot, bump refcount and return. */
120 list_for_each_entry(slot, &parent->slots, list) {
121 if (slot->number == slot_nr) {
122 kobject_get(&slot->kobj);
123 pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n",
124 __func__,
125 atomic_read(&slot->kobj.kref.refcount),
126 pci_domain_nr(parent), parent->number,
127 slot_nr);
128 goto out;
129 }
130 }
131
132placeholder:
133 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
134 if (!slot) {
135 slot = ERR_PTR(-ENOMEM);
136 goto out;
137 }
138
139 slot->bus = parent;
140 slot->number = slot_nr;
141
142 slot->kobj.kset = pci_slots_kset;
143 err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
144 "%s", name);
145 if (err) {
146 printk(KERN_ERR "Unable to register kobject %s\n", name);
147 goto err;
148 }
149
150 INIT_LIST_HEAD(&slot->list);
151 list_add(&slot->list, &parent->slots);
152
153 /* Don't care if debug printk has a -1 for slot_nr */
154 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
155 __func__, pci_domain_nr(parent), parent->number, slot_nr);
156
157 out:
158 up_write(&pci_bus_sem);
159 return slot;
160 err:
161 kfree(slot);
162 slot = ERR_PTR(err);
163 goto out;
164}
165EXPORT_SYMBOL_GPL(pci_create_slot);
166
167/**
168 * pci_update_slot_number - update %struct pci_slot -> number
169 * @slot - %struct pci_slot to update
170 * @slot_nr - new number for slot
171 *
172 * The primary purpose of this interface is to allow callers who earlier
173 * created a placeholder slot in pci_create_slot() by passing a -1 as
174 * slot_nr, to update their %struct pci_slot with the correct @slot_nr.
175 */
176
177void pci_update_slot_number(struct pci_slot *slot, int slot_nr)
178{
179 int name_count = 0;
180 struct pci_slot *tmp;
181
182 down_write(&pci_bus_sem);
183
184 list_for_each_entry(tmp, &slot->bus->slots, list) {
185 WARN_ON(tmp->number == slot_nr);
186 if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj)))
187 name_count++;
188 }
189
190 if (name_count > 1)
191 printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj));
192
193 slot->number = slot_nr;
194 up_write(&pci_bus_sem);
195}
196EXPORT_SYMBOL_GPL(pci_update_slot_number);
197
198/**
199 * pci_destroy_slot - decrement refcount for physical PCI slot
200 * @slot: struct pci_slot to decrement
201 *
202 * %struct pci_slot is refcounted, so destroying them is really easy; we
203 * just call kobject_put on its kobj and let our release methods do the
204 * rest.
205 */
206
207void pci_destroy_slot(struct pci_slot *slot)
208{
209 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__,
210 atomic_read(&slot->kobj.kref.refcount) - 1,
211 pci_domain_nr(slot->bus), slot->bus->number, slot->number);
212
213 down_write(&pci_bus_sem);
214 kobject_put(&slot->kobj);
215 up_write(&pci_bus_sem);
216}
217EXPORT_SYMBOL_GPL(pci_destroy_slot);
218
219static int pci_slot_init(void)
220{
221 struct kset *pci_bus_kset;
222
223 pci_bus_kset = bus_get_kset(&pci_bus_type);
224 pci_slots_kset = kset_create_and_add("slots", NULL,
225 &pci_bus_kset->kobj);
226 if (!pci_slots_kset) {
227 printk(KERN_ERR "PCI: Slot initialization failure\n");
228 return -ENOMEM;
229 }
230 return 0;
231}
232
233subsys_initcall(pci_slot_init);
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 9fcff0c33619..65129b54eb09 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1490,7 +1490,7 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, unsigned
1490 ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff))) 1490 ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff)))
1491 reserved++; 1491 reserved++;
1492 } 1492 }
1493 if ((count) || (reserved > 5) || 1493 if ((count == MAX_TUPLES) || (reserved > 5) ||
1494 ((!dev_ok || !ident_ok) && (count > 10))) 1494 ((!dev_ok || !ident_ok) && (count > 10)))
1495 count = 0; 1495 count = 0;
1496 1496
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 52d0aa8c2e7a..c21f9a9c3e3f 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -29,9 +29,9 @@
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
32#include <linux/of_platform.h>
32 33
33#include <pcmcia/ss.h> 34#include <pcmcia/ss.h>
34#include <asm/of_platform.h>
35 35
36static const char driver_name[] = "electra-cf"; 36static const char driver_name[] = "electra-cf";
37 37
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 13a5fbd50a07..ff66604e90d4 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -49,6 +49,8 @@
49#include <linux/interrupt.h> 49#include <linux/interrupt.h>
50#include <linux/fsl_devices.h> 50#include <linux/fsl_devices.h>
51#include <linux/bitops.h> 51#include <linux/bitops.h>
52#include <linux/of_device.h>
53#include <linux/of_platform.h>
52 54
53#include <asm/io.h> 55#include <asm/io.h>
54#include <asm/system.h> 56#include <asm/system.h>
@@ -57,8 +59,6 @@
57#include <asm/8xx_immap.h> 59#include <asm/8xx_immap.h>
58#include <asm/irq.h> 60#include <asm/irq.h>
59#include <asm/fs_pd.h> 61#include <asm/fs_pd.h>
60#include <asm/of_device.h>
61#include <asm/of_platform.h>
62 62
63#include <pcmcia/cs_types.h> 63#include <pcmcia/cs_types.h>
64#include <pcmcia/cs.h> 64#include <pcmcia/cs.h>
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 886dac823ed6..e3fa9a2d9a3d 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -1,3 +1,8 @@
1/*
2 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
3 * Bjorn Helgaas <bjorn.helgaas@hp.com>
4 */
5
1extern spinlock_t pnp_lock; 6extern spinlock_t pnp_lock;
2void *pnp_alloc(long size); 7void *pnp_alloc(long size);
3 8
@@ -19,22 +24,118 @@ void pnp_remove_card(struct pnp_card *card);
19int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev); 24int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev);
20void pnp_remove_card_device(struct pnp_dev *dev); 25void pnp_remove_card_device(struct pnp_dev *dev);
21 26
22struct pnp_option *pnp_build_option(int priority); 27struct pnp_port {
23struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev); 28 resource_size_t min; /* min base number */
24struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev, 29 resource_size_t max; /* max base number */
25 int priority); 30 resource_size_t align; /* align boundary */
26int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option, 31 resource_size_t size; /* size of range */
27 struct pnp_irq *data); 32 unsigned char flags; /* port flags */
28int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option, 33};
29 struct pnp_dma *data); 34
30int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option, 35#define PNP_IRQ_NR 256
31 struct pnp_port *data); 36typedef struct { DECLARE_BITMAP(bits, PNP_IRQ_NR); } pnp_irq_mask_t;
32int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option, 37
33 struct pnp_mem *data); 38struct pnp_irq {
39 pnp_irq_mask_t map; /* bitmap for IRQ lines */
40 unsigned char flags; /* IRQ flags */
41};
42
43struct pnp_dma {
44 unsigned char map; /* bitmask for DMA channels */
45 unsigned char flags; /* DMA flags */
46};
47
48struct pnp_mem {
49 resource_size_t min; /* min base number */
50 resource_size_t max; /* max base number */
51 resource_size_t align; /* align boundary */
52 resource_size_t size; /* size of range */
53 unsigned char flags; /* memory flags */
54};
55
56#define PNP_OPTION_DEPENDENT 0x80000000
57#define PNP_OPTION_SET_MASK 0xffff
58#define PNP_OPTION_SET_SHIFT 12
59#define PNP_OPTION_PRIORITY_MASK 0xfff
60#define PNP_OPTION_PRIORITY_SHIFT 0
61
62#define PNP_RES_PRIORITY_PREFERRED 0
63#define PNP_RES_PRIORITY_ACCEPTABLE 1
64#define PNP_RES_PRIORITY_FUNCTIONAL 2
65#define PNP_RES_PRIORITY_INVALID PNP_OPTION_PRIORITY_MASK
66
67struct pnp_option {
68 struct list_head list;
69 unsigned int flags; /* independent/dependent, set, priority */
70
71 unsigned long type; /* IORESOURCE_{IO,MEM,IRQ,DMA} */
72 union {
73 struct pnp_port port;
74 struct pnp_irq irq;
75 struct pnp_dma dma;
76 struct pnp_mem mem;
77 } u;
78};
79
80int pnp_register_irq_resource(struct pnp_dev *dev, unsigned int option_flags,
81 pnp_irq_mask_t *map, unsigned char flags);
82int pnp_register_dma_resource(struct pnp_dev *dev, unsigned int option_flags,
83 unsigned char map, unsigned char flags);
84int pnp_register_port_resource(struct pnp_dev *dev, unsigned int option_flags,
85 resource_size_t min, resource_size_t max,
86 resource_size_t align, resource_size_t size,
87 unsigned char flags);
88int pnp_register_mem_resource(struct pnp_dev *dev, unsigned int option_flags,
89 resource_size_t min, resource_size_t max,
90 resource_size_t align, resource_size_t size,
91 unsigned char flags);
92
93static inline int pnp_option_is_dependent(struct pnp_option *option)
94{
95 return option->flags & PNP_OPTION_DEPENDENT ? 1 : 0;
96}
97
98static inline unsigned int pnp_option_set(struct pnp_option *option)
99{
100 return (option->flags >> PNP_OPTION_SET_SHIFT) & PNP_OPTION_SET_MASK;
101}
102
103static inline unsigned int pnp_option_priority(struct pnp_option *option)
104{
105 return (option->flags >> PNP_OPTION_PRIORITY_SHIFT) &
106 PNP_OPTION_PRIORITY_MASK;
107}
108
109static inline unsigned int pnp_new_dependent_set(struct pnp_dev *dev,
110 int priority)
111{
112 unsigned int flags;
113
114 if (priority > PNP_RES_PRIORITY_FUNCTIONAL) {
115 dev_warn(&dev->dev, "invalid dependent option priority %d "
116 "clipped to %d", priority,
117 PNP_RES_PRIORITY_INVALID);
118 priority = PNP_RES_PRIORITY_INVALID;
119 }
120
121 flags = PNP_OPTION_DEPENDENT |
122 ((dev->num_dependent_sets & PNP_OPTION_SET_MASK) <<
123 PNP_OPTION_SET_SHIFT) |
124 ((priority & PNP_OPTION_PRIORITY_MASK) <<
125 PNP_OPTION_PRIORITY_SHIFT);
126
127 dev->num_dependent_sets++;
128
129 return flags;
130}
131
132char *pnp_option_priority_name(struct pnp_option *option);
133void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option);
134
34void pnp_init_resources(struct pnp_dev *dev); 135void pnp_init_resources(struct pnp_dev *dev);
35 136
36void pnp_fixup_device(struct pnp_dev *dev); 137void pnp_fixup_device(struct pnp_dev *dev);
37void pnp_free_option(struct pnp_option *option); 138void pnp_free_options(struct pnp_dev *dev);
38int __pnp_add_device(struct pnp_dev *dev); 139int __pnp_add_device(struct pnp_dev *dev);
39void __pnp_remove_device(struct pnp_dev *dev); 140void __pnp_remove_device(struct pnp_dev *dev);
40 141
@@ -43,29 +144,18 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res);
43int pnp_check_irq(struct pnp_dev *dev, struct resource *res); 144int pnp_check_irq(struct pnp_dev *dev, struct resource *res);
44int pnp_check_dma(struct pnp_dev *dev, struct resource *res); 145int pnp_check_dma(struct pnp_dev *dev, struct resource *res);
45 146
147char *pnp_resource_type_name(struct resource *res);
46void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc); 148void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc);
47 149
48void pnp_init_resource(struct resource *res); 150void pnp_free_resources(struct pnp_dev *dev);
49 151int pnp_resource_type(struct resource *res);
50struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev,
51 unsigned int type, unsigned int num);
52
53#define PNP_MAX_PORT 40
54#define PNP_MAX_MEM 24
55#define PNP_MAX_IRQ 2
56#define PNP_MAX_DMA 2
57 152
58struct pnp_resource { 153struct pnp_resource {
154 struct list_head list;
59 struct resource res; 155 struct resource res;
60 unsigned int index; /* ISAPNP config register index */
61}; 156};
62 157
63struct pnp_resource_table { 158void pnp_free_resource(struct pnp_resource *pnp_res);
64 struct pnp_resource port[PNP_MAX_PORT];
65 struct pnp_resource mem[PNP_MAX_MEM];
66 struct pnp_resource dma[PNP_MAX_DMA];
67 struct pnp_resource irq[PNP_MAX_IRQ];
68};
69 159
70struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, 160struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
71 int flags); 161 int flags);
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 20771b7d4482..a411582bcd72 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -99,14 +99,28 @@ static void pnp_free_ids(struct pnp_dev *dev)
99 } 99 }
100} 100}
101 101
102void pnp_free_resource(struct pnp_resource *pnp_res)
103{
104 list_del(&pnp_res->list);
105 kfree(pnp_res);
106}
107
108void pnp_free_resources(struct pnp_dev *dev)
109{
110 struct pnp_resource *pnp_res, *tmp;
111
112 list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) {
113 pnp_free_resource(pnp_res);
114 }
115}
116
102static void pnp_release_device(struct device *dmdev) 117static void pnp_release_device(struct device *dmdev)
103{ 118{
104 struct pnp_dev *dev = to_pnp_dev(dmdev); 119 struct pnp_dev *dev = to_pnp_dev(dmdev);
105 120
106 pnp_free_option(dev->independent);
107 pnp_free_option(dev->dependent);
108 pnp_free_ids(dev); 121 pnp_free_ids(dev);
109 kfree(dev->res); 122 pnp_free_resources(dev);
123 pnp_free_options(dev);
110 kfree(dev); 124 kfree(dev);
111} 125}
112 126
@@ -119,12 +133,8 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid
119 if (!dev) 133 if (!dev)
120 return NULL; 134 return NULL;
121 135
122 dev->res = kzalloc(sizeof(struct pnp_resource_table), GFP_KERNEL); 136 INIT_LIST_HEAD(&dev->resources);
123 if (!dev->res) { 137 INIT_LIST_HEAD(&dev->options);
124 kfree(dev);
125 return NULL;
126 }
127
128 dev->protocol = protocol; 138 dev->protocol = protocol;
129 dev->number = id; 139 dev->number = id;
130 dev->dma_mask = DMA_24BIT_MASK; 140 dev->dma_mask = DMA_24BIT_MASK;
@@ -140,7 +150,6 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid
140 150
141 dev_id = pnp_add_id(dev, pnpid); 151 dev_id = pnp_add_id(dev, pnpid);
142 if (!dev_id) { 152 if (!dev_id) {
143 kfree(dev->res);
144 kfree(dev); 153 kfree(dev);
145 return NULL; 154 return NULL;
146 } 155 }
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 5695a79f3a52..a876ecf7028c 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -3,6 +3,8 @@
3 * 3 *
4 * Some code, especially possible resource dumping is based on isapnp_proc.c (c) Jaroslav Kysela <perex@perex.cz> 4 * Some code, especially possible resource dumping is based on isapnp_proc.c (c) Jaroslav Kysela <perex@perex.cz>
5 * Copyright 2002 Adam Belay <ambx1@neo.rr.com> 5 * Copyright 2002 Adam Belay <ambx1@neo.rr.com>
6 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
7 * Bjorn Helgaas <bjorn.helgaas@hp.com>
6 */ 8 */
7 9
8#include <linux/pnp.h> 10#include <linux/pnp.h>
@@ -53,11 +55,13 @@ static int pnp_printf(pnp_info_buffer_t * buffer, char *fmt, ...)
53static void pnp_print_port(pnp_info_buffer_t * buffer, char *space, 55static void pnp_print_port(pnp_info_buffer_t * buffer, char *space,
54 struct pnp_port *port) 56 struct pnp_port *port)
55{ 57{
56 pnp_printf(buffer, 58 pnp_printf(buffer, "%sport %#llx-%#llx, align %#llx, size %#llx, "
57 "%sport 0x%x-0x%x, align 0x%x, size 0x%x, %i-bit address decoding\n", 59 "%i-bit address decoding\n", space,
58 space, port->min, port->max, 60 (unsigned long long) port->min,
59 port->align ? (port->align - 1) : 0, port->size, 61 (unsigned long long) port->max,
60 port->flags & PNP_PORT_FLAG_16BITADDR ? 16 : 10); 62 port->align ? ((unsigned long long) port->align - 1) : 0,
63 (unsigned long long) port->size,
64 port->flags & IORESOURCE_IO_16BIT_ADDR ? 16 : 10);
61} 65}
62 66
63static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, 67static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space,
@@ -67,7 +71,7 @@ static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space,
67 71
68 pnp_printf(buffer, "%sirq ", space); 72 pnp_printf(buffer, "%sirq ", space);
69 for (i = 0; i < PNP_IRQ_NR; i++) 73 for (i = 0; i < PNP_IRQ_NR; i++)
70 if (test_bit(i, irq->map)) { 74 if (test_bit(i, irq->map.bits)) {
71 if (!first) { 75 if (!first) {
72 pnp_printf(buffer, ","); 76 pnp_printf(buffer, ",");
73 } else { 77 } else {
@@ -78,7 +82,7 @@ static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space,
78 else 82 else
79 pnp_printf(buffer, "%i", i); 83 pnp_printf(buffer, "%i", i);
80 } 84 }
81 if (bitmap_empty(irq->map, PNP_IRQ_NR)) 85 if (bitmap_empty(irq->map.bits, PNP_IRQ_NR))
82 pnp_printf(buffer, "<none>"); 86 pnp_printf(buffer, "<none>");
83 if (irq->flags & IORESOURCE_IRQ_HIGHEDGE) 87 if (irq->flags & IORESOURCE_IRQ_HIGHEDGE)
84 pnp_printf(buffer, " High-Edge"); 88 pnp_printf(buffer, " High-Edge");
@@ -88,6 +92,8 @@ static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space,
88 pnp_printf(buffer, " High-Level"); 92 pnp_printf(buffer, " High-Level");
89 if (irq->flags & IORESOURCE_IRQ_LOWLEVEL) 93 if (irq->flags & IORESOURCE_IRQ_LOWLEVEL)
90 pnp_printf(buffer, " Low-Level"); 94 pnp_printf(buffer, " Low-Level");
95 if (irq->flags & IORESOURCE_IRQ_OPTIONAL)
96 pnp_printf(buffer, " (optional)");
91 pnp_printf(buffer, "\n"); 97 pnp_printf(buffer, "\n");
92} 98}
93 99
@@ -148,8 +154,11 @@ static void pnp_print_mem(pnp_info_buffer_t * buffer, char *space,
148{ 154{
149 char *s; 155 char *s;
150 156
151 pnp_printf(buffer, "%sMemory 0x%x-0x%x, align 0x%x, size 0x%x", 157 pnp_printf(buffer, "%sMemory %#llx-%#llx, align %#llx, size %#llx",
152 space, mem->min, mem->max, mem->align, mem->size); 158 space, (unsigned long long) mem->min,
159 (unsigned long long) mem->max,
160 (unsigned long long) mem->align,
161 (unsigned long long) mem->size);
153 if (mem->flags & IORESOURCE_MEM_WRITEABLE) 162 if (mem->flags & IORESOURCE_MEM_WRITEABLE)
154 pnp_printf(buffer, ", writeable"); 163 pnp_printf(buffer, ", writeable");
155 if (mem->flags & IORESOURCE_MEM_CACHEABLE) 164 if (mem->flags & IORESOURCE_MEM_CACHEABLE)
@@ -177,65 +186,58 @@ static void pnp_print_mem(pnp_info_buffer_t * buffer, char *space,
177} 186}
178 187
179static void pnp_print_option(pnp_info_buffer_t * buffer, char *space, 188static void pnp_print_option(pnp_info_buffer_t * buffer, char *space,
180 struct pnp_option *option, int dep) 189 struct pnp_option *option)
181{ 190{
182 char *s; 191 switch (option->type) {
183 struct pnp_port *port; 192 case IORESOURCE_IO:
184 struct pnp_irq *irq; 193 pnp_print_port(buffer, space, &option->u.port);
185 struct pnp_dma *dma; 194 break;
186 struct pnp_mem *mem; 195 case IORESOURCE_MEM:
187 196 pnp_print_mem(buffer, space, &option->u.mem);
188 if (dep) { 197 break;
189 switch (option->priority) { 198 case IORESOURCE_IRQ:
190 case PNP_RES_PRIORITY_PREFERRED: 199 pnp_print_irq(buffer, space, &option->u.irq);
191 s = "preferred"; 200 break;
192 break; 201 case IORESOURCE_DMA:
193 case PNP_RES_PRIORITY_ACCEPTABLE: 202 pnp_print_dma(buffer, space, &option->u.dma);
194 s = "acceptable"; 203 break;
195 break;
196 case PNP_RES_PRIORITY_FUNCTIONAL:
197 s = "functional";
198 break;
199 default:
200 s = "invalid";
201 }
202 pnp_printf(buffer, "Dependent: %02i - Priority %s\n", dep, s);
203 } 204 }
204
205 for (port = option->port; port; port = port->next)
206 pnp_print_port(buffer, space, port);
207 for (irq = option->irq; irq; irq = irq->next)
208 pnp_print_irq(buffer, space, irq);
209 for (dma = option->dma; dma; dma = dma->next)
210 pnp_print_dma(buffer, space, dma);
211 for (mem = option->mem; mem; mem = mem->next)
212 pnp_print_mem(buffer, space, mem);
213} 205}
214 206
215static ssize_t pnp_show_options(struct device *dmdev, 207static ssize_t pnp_show_options(struct device *dmdev,
216 struct device_attribute *attr, char *buf) 208 struct device_attribute *attr, char *buf)
217{ 209{
218 struct pnp_dev *dev = to_pnp_dev(dmdev); 210 struct pnp_dev *dev = to_pnp_dev(dmdev);
219 struct pnp_option *independent = dev->independent; 211 pnp_info_buffer_t *buffer;
220 struct pnp_option *dependent = dev->dependent; 212 struct pnp_option *option;
221 int ret, dep = 1; 213 int ret, dep = 0, set = 0;
214 char *indent;
222 215
223 pnp_info_buffer_t *buffer = (pnp_info_buffer_t *) 216 buffer = pnp_alloc(sizeof(pnp_info_buffer_t));
224 pnp_alloc(sizeof(pnp_info_buffer_t));
225 if (!buffer) 217 if (!buffer)
226 return -ENOMEM; 218 return -ENOMEM;
227 219
228 buffer->len = PAGE_SIZE; 220 buffer->len = PAGE_SIZE;
229 buffer->buffer = buf; 221 buffer->buffer = buf;
230 buffer->curr = buffer->buffer; 222 buffer->curr = buffer->buffer;
231 if (independent)
232 pnp_print_option(buffer, "", independent, 0);
233 223
234 while (dependent) { 224 list_for_each_entry(option, &dev->options, list) {
235 pnp_print_option(buffer, " ", dependent, dep); 225 if (pnp_option_is_dependent(option)) {
236 dependent = dependent->next; 226 indent = " ";
237 dep++; 227 if (!dep || pnp_option_set(option) != set) {
228 set = pnp_option_set(option);
229 dep = 1;
230 pnp_printf(buffer, "Dependent: %02i - "
231 "Priority %s\n", set,
232 pnp_option_priority_name(option));
233 }
234 } else {
235 dep = 0;
236 indent = "";
237 }
238 pnp_print_option(buffer, indent, option);
238 } 239 }
240
239 ret = (buffer->curr - buf); 241 ret = (buffer->curr - buf);
240 kfree(buffer); 242 kfree(buffer);
241 return ret; 243 return ret;
@@ -248,79 +250,59 @@ static ssize_t pnp_show_current_resources(struct device *dmdev,
248 char *buf) 250 char *buf)
249{ 251{
250 struct pnp_dev *dev = to_pnp_dev(dmdev); 252 struct pnp_dev *dev = to_pnp_dev(dmdev);
251 struct resource *res;
252 int i, ret;
253 pnp_info_buffer_t *buffer; 253 pnp_info_buffer_t *buffer;
254 struct pnp_resource *pnp_res;
255 struct resource *res;
256 int ret;
254 257
255 if (!dev) 258 if (!dev)
256 return -EINVAL; 259 return -EINVAL;
257 260
258 buffer = (pnp_info_buffer_t *) pnp_alloc(sizeof(pnp_info_buffer_t)); 261 buffer = pnp_alloc(sizeof(pnp_info_buffer_t));
259 if (!buffer) 262 if (!buffer)
260 return -ENOMEM; 263 return -ENOMEM;
264
261 buffer->len = PAGE_SIZE; 265 buffer->len = PAGE_SIZE;
262 buffer->buffer = buf; 266 buffer->buffer = buf;
263 buffer->curr = buffer->buffer; 267 buffer->curr = buffer->buffer;
264 268
265 pnp_printf(buffer, "state = "); 269 pnp_printf(buffer, "state = %s\n", dev->active ? "active" : "disabled");
266 if (dev->active) 270
267 pnp_printf(buffer, "active\n"); 271 list_for_each_entry(pnp_res, &dev->resources, list) {
268 else 272 res = &pnp_res->res;
269 pnp_printf(buffer, "disabled\n"); 273
270 274 pnp_printf(buffer, pnp_resource_type_name(res));
271 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) { 275
272 if (pnp_resource_valid(res)) { 276 if (res->flags & IORESOURCE_DISABLED) {
273 pnp_printf(buffer, "io"); 277 pnp_printf(buffer, " disabled\n");
274 if (res->flags & IORESOURCE_DISABLED) 278 continue;
275 pnp_printf(buffer, " disabled\n");
276 else
277 pnp_printf(buffer, " 0x%llx-0x%llx\n",
278 (unsigned long long) res->start,
279 (unsigned long long) res->end);
280 }
281 }
282 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
283 if (pnp_resource_valid(res)) {
284 pnp_printf(buffer, "mem");
285 if (res->flags & IORESOURCE_DISABLED)
286 pnp_printf(buffer, " disabled\n");
287 else
288 pnp_printf(buffer, " 0x%llx-0x%llx\n",
289 (unsigned long long) res->start,
290 (unsigned long long) res->end);
291 }
292 }
293 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IRQ, i)); i++) {
294 if (pnp_resource_valid(res)) {
295 pnp_printf(buffer, "irq");
296 if (res->flags & IORESOURCE_DISABLED)
297 pnp_printf(buffer, " disabled\n");
298 else
299 pnp_printf(buffer, " %lld\n",
300 (unsigned long long) res->start);
301 } 279 }
302 } 280
303 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_DMA, i)); i++) { 281 switch (pnp_resource_type(res)) {
304 if (pnp_resource_valid(res)) { 282 case IORESOURCE_IO:
305 pnp_printf(buffer, "dma"); 283 case IORESOURCE_MEM:
306 if (res->flags & IORESOURCE_DISABLED) 284 pnp_printf(buffer, " %#llx-%#llx\n",
307 pnp_printf(buffer, " disabled\n"); 285 (unsigned long long) res->start,
308 else 286 (unsigned long long) res->end);
309 pnp_printf(buffer, " %lld\n", 287 break;
310 (unsigned long long) res->start); 288 case IORESOURCE_IRQ:
289 case IORESOURCE_DMA:
290 pnp_printf(buffer, " %lld\n",
291 (unsigned long long) res->start);
292 break;
311 } 293 }
312 } 294 }
295
313 ret = (buffer->curr - buf); 296 ret = (buffer->curr - buf);
314 kfree(buffer); 297 kfree(buffer);
315 return ret; 298 return ret;
316} 299}
317 300
318static ssize_t 301static ssize_t pnp_set_current_resources(struct device *dmdev,
319pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, 302 struct device_attribute *attr,
320 const char *ubuf, size_t count) 303 const char *ubuf, size_t count)
321{ 304{
322 struct pnp_dev *dev = to_pnp_dev(dmdev); 305 struct pnp_dev *dev = to_pnp_dev(dmdev);
323 struct pnp_resource *pnp_res;
324 char *buf = (void *)ubuf; 306 char *buf = (void *)ubuf;
325 int retval = 0; 307 int retval = 0;
326 resource_size_t start, end; 308 resource_size_t start, end;
@@ -368,7 +350,6 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
368 goto done; 350 goto done;
369 } 351 }
370 if (!strnicmp(buf, "set", 3)) { 352 if (!strnicmp(buf, "set", 3)) {
371 int nport = 0, nmem = 0, nirq = 0, ndma = 0;
372 if (dev->active) 353 if (dev->active)
373 goto done; 354 goto done;
374 buf += 3; 355 buf += 3;
@@ -391,10 +372,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
391 end = simple_strtoul(buf, &buf, 0); 372 end = simple_strtoul(buf, &buf, 0);
392 } else 373 } else
393 end = start; 374 end = start;
394 pnp_res = pnp_add_io_resource(dev, start, end, 375 pnp_add_io_resource(dev, start, end, 0);
395 0);
396 if (pnp_res)
397 pnp_res->index = nport++;
398 continue; 376 continue;
399 } 377 }
400 if (!strnicmp(buf, "mem", 3)) { 378 if (!strnicmp(buf, "mem", 3)) {
@@ -411,10 +389,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
411 end = simple_strtoul(buf, &buf, 0); 389 end = simple_strtoul(buf, &buf, 0);
412 } else 390 } else
413 end = start; 391 end = start;
414 pnp_res = pnp_add_mem_resource(dev, start, end, 392 pnp_add_mem_resource(dev, start, end, 0);
415 0);
416 if (pnp_res)
417 pnp_res->index = nmem++;
418 continue; 393 continue;
419 } 394 }
420 if (!strnicmp(buf, "irq", 3)) { 395 if (!strnicmp(buf, "irq", 3)) {
@@ -422,9 +397,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
422 while (isspace(*buf)) 397 while (isspace(*buf))
423 ++buf; 398 ++buf;
424 start = simple_strtoul(buf, &buf, 0); 399 start = simple_strtoul(buf, &buf, 0);
425 pnp_res = pnp_add_irq_resource(dev, start, 0); 400 pnp_add_irq_resource(dev, start, 0);
426 if (pnp_res)
427 pnp_res->index = nirq++;
428 continue; 401 continue;
429 } 402 }
430 if (!strnicmp(buf, "dma", 3)) { 403 if (!strnicmp(buf, "dma", 3)) {
@@ -432,9 +405,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
432 while (isspace(*buf)) 405 while (isspace(*buf))
433 ++buf; 406 ++buf;
434 start = simple_strtoul(buf, &buf, 0); 407 start = simple_strtoul(buf, &buf, 0);
435 pnp_res = pnp_add_dma_resource(dev, start, 0); 408 pnp_add_dma_resource(dev, start, 0);
436 if (pnp_res)
437 pnp_res->index = ndma++;
438 continue; 409 continue;
439 } 410 }
440 break; 411 break;
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index f1bccdbdeb08..101a835e8759 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -429,154 +429,135 @@ static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card,
429 * Add IRQ resource to resources list. 429 * Add IRQ resource to resources list.
430 */ 430 */
431static void __init isapnp_parse_irq_resource(struct pnp_dev *dev, 431static void __init isapnp_parse_irq_resource(struct pnp_dev *dev,
432 struct pnp_option *option, 432 unsigned int option_flags,
433 int size) 433 int size)
434{ 434{
435 unsigned char tmp[3]; 435 unsigned char tmp[3];
436 struct pnp_irq *irq;
437 unsigned long bits; 436 unsigned long bits;
437 pnp_irq_mask_t map;
438 unsigned char flags = IORESOURCE_IRQ_HIGHEDGE;
438 439
439 isapnp_peek(tmp, size); 440 isapnp_peek(tmp, size);
440 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
441 if (!irq)
442 return;
443 bits = (tmp[1] << 8) | tmp[0]; 441 bits = (tmp[1] << 8) | tmp[0];
444 bitmap_copy(irq->map, &bits, 16); 442
443 bitmap_zero(map.bits, PNP_IRQ_NR);
444 bitmap_copy(map.bits, &bits, 16);
445
445 if (size > 2) 446 if (size > 2)
446 irq->flags = tmp[2]; 447 flags = tmp[2];
447 else 448
448 irq->flags = IORESOURCE_IRQ_HIGHEDGE; 449 pnp_register_irq_resource(dev, option_flags, &map, flags);
449 pnp_register_irq_resource(dev, option, irq);
450} 450}
451 451
452/* 452/*
453 * Add DMA resource to resources list. 453 * Add DMA resource to resources list.
454 */ 454 */
455static void __init isapnp_parse_dma_resource(struct pnp_dev *dev, 455static void __init isapnp_parse_dma_resource(struct pnp_dev *dev,
456 struct pnp_option *option, 456 unsigned int option_flags,
457 int size) 457 int size)
458{ 458{
459 unsigned char tmp[2]; 459 unsigned char tmp[2];
460 struct pnp_dma *dma;
461 460
462 isapnp_peek(tmp, size); 461 isapnp_peek(tmp, size);
463 dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL); 462 pnp_register_dma_resource(dev, option_flags, tmp[0], tmp[1]);
464 if (!dma)
465 return;
466 dma->map = tmp[0];
467 dma->flags = tmp[1];
468 pnp_register_dma_resource(dev, option, dma);
469} 463}
470 464
471/* 465/*
472 * Add port resource to resources list. 466 * Add port resource to resources list.
473 */ 467 */
474static void __init isapnp_parse_port_resource(struct pnp_dev *dev, 468static void __init isapnp_parse_port_resource(struct pnp_dev *dev,
475 struct pnp_option *option, 469 unsigned int option_flags,
476 int size) 470 int size)
477{ 471{
478 unsigned char tmp[7]; 472 unsigned char tmp[7];
479 struct pnp_port *port; 473 resource_size_t min, max, align, len;
474 unsigned char flags;
480 475
481 isapnp_peek(tmp, size); 476 isapnp_peek(tmp, size);
482 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); 477 min = (tmp[2] << 8) | tmp[1];
483 if (!port) 478 max = (tmp[4] << 8) | tmp[3];
484 return; 479 align = tmp[5];
485 port->min = (tmp[2] << 8) | tmp[1]; 480 len = tmp[6];
486 port->max = (tmp[4] << 8) | tmp[3]; 481 flags = tmp[0] ? IORESOURCE_IO_16BIT_ADDR : 0;
487 port->align = tmp[5]; 482 pnp_register_port_resource(dev, option_flags,
488 port->size = tmp[6]; 483 min, max, align, len, flags);
489 port->flags = tmp[0] ? PNP_PORT_FLAG_16BITADDR : 0;
490 pnp_register_port_resource(dev, option, port);
491} 484}
492 485
493/* 486/*
494 * Add fixed port resource to resources list. 487 * Add fixed port resource to resources list.
495 */ 488 */
496static void __init isapnp_parse_fixed_port_resource(struct pnp_dev *dev, 489static void __init isapnp_parse_fixed_port_resource(struct pnp_dev *dev,
497 struct pnp_option *option, 490 unsigned int option_flags,
498 int size) 491 int size)
499{ 492{
500 unsigned char tmp[3]; 493 unsigned char tmp[3];
501 struct pnp_port *port; 494 resource_size_t base, len;
502 495
503 isapnp_peek(tmp, size); 496 isapnp_peek(tmp, size);
504 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); 497 base = (tmp[1] << 8) | tmp[0];
505 if (!port) 498 len = tmp[2];
506 return; 499 pnp_register_port_resource(dev, option_flags, base, base, 0, len,
507 port->min = port->max = (tmp[1] << 8) | tmp[0]; 500 IORESOURCE_IO_FIXED);
508 port->size = tmp[2];
509 port->align = 0;
510 port->flags = PNP_PORT_FLAG_FIXED;
511 pnp_register_port_resource(dev, option, port);
512} 501}
513 502
514/* 503/*
515 * Add memory resource to resources list. 504 * Add memory resource to resources list.
516 */ 505 */
517static void __init isapnp_parse_mem_resource(struct pnp_dev *dev, 506static void __init isapnp_parse_mem_resource(struct pnp_dev *dev,
518 struct pnp_option *option, 507 unsigned int option_flags,
519 int size) 508 int size)
520{ 509{
521 unsigned char tmp[9]; 510 unsigned char tmp[9];
522 struct pnp_mem *mem; 511 resource_size_t min, max, align, len;
512 unsigned char flags;
523 513
524 isapnp_peek(tmp, size); 514 isapnp_peek(tmp, size);
525 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); 515 min = ((tmp[2] << 8) | tmp[1]) << 8;
526 if (!mem) 516 max = ((tmp[4] << 8) | tmp[3]) << 8;
527 return; 517 align = (tmp[6] << 8) | tmp[5];
528 mem->min = ((tmp[2] << 8) | tmp[1]) << 8; 518 len = ((tmp[8] << 8) | tmp[7]) << 8;
529 mem->max = ((tmp[4] << 8) | tmp[3]) << 8; 519 flags = tmp[0];
530 mem->align = (tmp[6] << 8) | tmp[5]; 520 pnp_register_mem_resource(dev, option_flags,
531 mem->size = ((tmp[8] << 8) | tmp[7]) << 8; 521 min, max, align, len, flags);
532 mem->flags = tmp[0];
533 pnp_register_mem_resource(dev, option, mem);
534} 522}
535 523
536/* 524/*
537 * Add 32-bit memory resource to resources list. 525 * Add 32-bit memory resource to resources list.
538 */ 526 */
539static void __init isapnp_parse_mem32_resource(struct pnp_dev *dev, 527static void __init isapnp_parse_mem32_resource(struct pnp_dev *dev,
540 struct pnp_option *option, 528 unsigned int option_flags,
541 int size) 529 int size)
542{ 530{
543 unsigned char tmp[17]; 531 unsigned char tmp[17];
544 struct pnp_mem *mem; 532 resource_size_t min, max, align, len;
533 unsigned char flags;
545 534
546 isapnp_peek(tmp, size); 535 isapnp_peek(tmp, size);
547 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); 536 min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
548 if (!mem) 537 max = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5];
549 return; 538 align = (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9];
550 mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; 539 len = (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13];
551 mem->max = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; 540 flags = tmp[0];
552 mem->align = 541 pnp_register_mem_resource(dev, option_flags,
553 (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9]; 542 min, max, align, len, flags);
554 mem->size =
555 (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13];
556 mem->flags = tmp[0];
557 pnp_register_mem_resource(dev, option, mem);
558} 543}
559 544
560/* 545/*
561 * Add 32-bit fixed memory resource to resources list. 546 * Add 32-bit fixed memory resource to resources list.
562 */ 547 */
563static void __init isapnp_parse_fixed_mem32_resource(struct pnp_dev *dev, 548static void __init isapnp_parse_fixed_mem32_resource(struct pnp_dev *dev,
564 struct pnp_option *option, 549 unsigned int option_flags,
565 int size) 550 int size)
566{ 551{
567 unsigned char tmp[9]; 552 unsigned char tmp[9];
568 struct pnp_mem *mem; 553 resource_size_t base, len;
554 unsigned char flags;
569 555
570 isapnp_peek(tmp, size); 556 isapnp_peek(tmp, size);
571 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); 557 base = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
572 if (!mem) 558 len = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5];
573 return; 559 flags = tmp[0];
574 mem->min = mem->max = 560 pnp_register_mem_resource(dev, option_flags, base, base, 0, len, flags);
575 (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
576 mem->size = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5];
577 mem->align = 0;
578 mem->flags = tmp[0];
579 pnp_register_mem_resource(dev, option, mem);
580} 561}
581 562
582/* 563/*
@@ -604,20 +585,16 @@ isapnp_parse_name(char *name, unsigned int name_max, unsigned short *size)
604static int __init isapnp_create_device(struct pnp_card *card, 585static int __init isapnp_create_device(struct pnp_card *card,
605 unsigned short size) 586 unsigned short size)
606{ 587{
607 int number = 0, skip = 0, priority = 0, compat = 0; 588 int number = 0, skip = 0, priority, compat = 0;
608 unsigned char type, tmp[17]; 589 unsigned char type, tmp[17];
609 struct pnp_option *option; 590 unsigned int option_flags;
610 struct pnp_dev *dev; 591 struct pnp_dev *dev;
611 u32 eisa_id; 592 u32 eisa_id;
612 char id[8]; 593 char id[8];
613 594
614 if ((dev = isapnp_parse_device(card, size, number++)) == NULL) 595 if ((dev = isapnp_parse_device(card, size, number++)) == NULL)
615 return 1; 596 return 1;
616 option = pnp_register_independent_option(dev); 597 option_flags = 0;
617 if (!option) {
618 kfree(dev);
619 return 1;
620 }
621 pnp_add_card_device(card, dev); 598 pnp_add_card_device(card, dev);
622 599
623 while (1) { 600 while (1) {
@@ -634,16 +611,11 @@ static int __init isapnp_create_device(struct pnp_card *card,
634 return 1; 611 return 1;
635 size = 0; 612 size = 0;
636 skip = 0; 613 skip = 0;
637 option = pnp_register_independent_option(dev); 614 option_flags = 0;
638 if (!option) {
639 kfree(dev);
640 return 1;
641 }
642 pnp_add_card_device(card, dev); 615 pnp_add_card_device(card, dev);
643 } else { 616 } else {
644 skip = 1; 617 skip = 1;
645 } 618 }
646 priority = 0;
647 compat = 0; 619 compat = 0;
648 break; 620 break;
649 case _STAG_COMPATDEVID: 621 case _STAG_COMPATDEVID:
@@ -660,44 +632,42 @@ static int __init isapnp_create_device(struct pnp_card *card,
660 case _STAG_IRQ: 632 case _STAG_IRQ:
661 if (size < 2 || size > 3) 633 if (size < 2 || size > 3)
662 goto __skip; 634 goto __skip;
663 isapnp_parse_irq_resource(dev, option, size); 635 isapnp_parse_irq_resource(dev, option_flags, size);
664 size = 0; 636 size = 0;
665 break; 637 break;
666 case _STAG_DMA: 638 case _STAG_DMA:
667 if (size != 2) 639 if (size != 2)
668 goto __skip; 640 goto __skip;
669 isapnp_parse_dma_resource(dev, option, size); 641 isapnp_parse_dma_resource(dev, option_flags, size);
670 size = 0; 642 size = 0;
671 break; 643 break;
672 case _STAG_STARTDEP: 644 case _STAG_STARTDEP:
673 if (size > 1) 645 if (size > 1)
674 goto __skip; 646 goto __skip;
675 priority = 0x100 | PNP_RES_PRIORITY_ACCEPTABLE; 647 priority = PNP_RES_PRIORITY_ACCEPTABLE;
676 if (size > 0) { 648 if (size > 0) {
677 isapnp_peek(tmp, size); 649 isapnp_peek(tmp, size);
678 priority = 0x100 | tmp[0]; 650 priority = tmp[0];
679 size = 0; 651 size = 0;
680 } 652 }
681 option = pnp_register_dependent_option(dev, priority); 653 option_flags = pnp_new_dependent_set(dev, priority);
682 if (!option)
683 return 1;
684 break; 654 break;
685 case _STAG_ENDDEP: 655 case _STAG_ENDDEP:
686 if (size != 0) 656 if (size != 0)
687 goto __skip; 657 goto __skip;
688 priority = 0; 658 option_flags = 0;
689 dev_dbg(&dev->dev, "end dependent options\n");
690 break; 659 break;
691 case _STAG_IOPORT: 660 case _STAG_IOPORT:
692 if (size != 7) 661 if (size != 7)
693 goto __skip; 662 goto __skip;
694 isapnp_parse_port_resource(dev, option, size); 663 isapnp_parse_port_resource(dev, option_flags, size);
695 size = 0; 664 size = 0;
696 break; 665 break;
697 case _STAG_FIXEDIO: 666 case _STAG_FIXEDIO:
698 if (size != 3) 667 if (size != 3)
699 goto __skip; 668 goto __skip;
700 isapnp_parse_fixed_port_resource(dev, option, size); 669 isapnp_parse_fixed_port_resource(dev, option_flags,
670 size);
701 size = 0; 671 size = 0;
702 break; 672 break;
703 case _STAG_VENDOR: 673 case _STAG_VENDOR:
@@ -705,7 +675,7 @@ static int __init isapnp_create_device(struct pnp_card *card,
705 case _LTAG_MEMRANGE: 675 case _LTAG_MEMRANGE:
706 if (size != 9) 676 if (size != 9)
707 goto __skip; 677 goto __skip;
708 isapnp_parse_mem_resource(dev, option, size); 678 isapnp_parse_mem_resource(dev, option_flags, size);
709 size = 0; 679 size = 0;
710 break; 680 break;
711 case _LTAG_ANSISTR: 681 case _LTAG_ANSISTR:
@@ -720,13 +690,14 @@ static int __init isapnp_create_device(struct pnp_card *card,
720 case _LTAG_MEM32RANGE: 690 case _LTAG_MEM32RANGE:
721 if (size != 17) 691 if (size != 17)
722 goto __skip; 692 goto __skip;
723 isapnp_parse_mem32_resource(dev, option, size); 693 isapnp_parse_mem32_resource(dev, option_flags, size);
724 size = 0; 694 size = 0;
725 break; 695 break;
726 case _LTAG_FIXEDMEM32RANGE: 696 case _LTAG_FIXEDMEM32RANGE:
727 if (size != 9) 697 if (size != 9)
728 goto __skip; 698 goto __skip;
729 isapnp_parse_fixed_mem32_resource(dev, option, size); 699 isapnp_parse_fixed_mem32_resource(dev, option_flags,
700 size);
730 size = 0; 701 size = 0;
731 break; 702 break;
732 case _STAG_END: 703 case _STAG_END:
@@ -928,7 +899,6 @@ EXPORT_SYMBOL(isapnp_write_byte);
928 899
929static int isapnp_get_resources(struct pnp_dev *dev) 900static int isapnp_get_resources(struct pnp_dev *dev)
930{ 901{
931 struct pnp_resource *pnp_res;
932 int i, ret; 902 int i, ret;
933 903
934 dev_dbg(&dev->dev, "get resources\n"); 904 dev_dbg(&dev->dev, "get resources\n");
@@ -940,35 +910,23 @@ static int isapnp_get_resources(struct pnp_dev *dev)
940 910
941 for (i = 0; i < ISAPNP_MAX_PORT; i++) { 911 for (i = 0; i < ISAPNP_MAX_PORT; i++) {
942 ret = isapnp_read_word(ISAPNP_CFG_PORT + (i << 1)); 912 ret = isapnp_read_word(ISAPNP_CFG_PORT + (i << 1));
943 if (ret) { 913 pnp_add_io_resource(dev, ret, ret,
944 pnp_res = pnp_add_io_resource(dev, ret, ret, 0); 914 ret == 0 ? IORESOURCE_DISABLED : 0);
945 if (pnp_res)
946 pnp_res->index = i;
947 }
948 } 915 }
949 for (i = 0; i < ISAPNP_MAX_MEM; i++) { 916 for (i = 0; i < ISAPNP_MAX_MEM; i++) {
950 ret = isapnp_read_word(ISAPNP_CFG_MEM + (i << 3)) << 8; 917 ret = isapnp_read_word(ISAPNP_CFG_MEM + (i << 3)) << 8;
951 if (ret) { 918 pnp_add_mem_resource(dev, ret, ret,
952 pnp_res = pnp_add_mem_resource(dev, ret, ret, 0); 919 ret == 0 ? IORESOURCE_DISABLED : 0);
953 if (pnp_res)
954 pnp_res->index = i;
955 }
956 } 920 }
957 for (i = 0; i < ISAPNP_MAX_IRQ; i++) { 921 for (i = 0; i < ISAPNP_MAX_IRQ; i++) {
958 ret = isapnp_read_word(ISAPNP_CFG_IRQ + (i << 1)) >> 8; 922 ret = isapnp_read_word(ISAPNP_CFG_IRQ + (i << 1)) >> 8;
959 if (ret) { 923 pnp_add_irq_resource(dev, ret,
960 pnp_res = pnp_add_irq_resource(dev, ret, 0); 924 ret == 0 ? IORESOURCE_DISABLED : 0);
961 if (pnp_res)
962 pnp_res->index = i;
963 }
964 } 925 }
965 for (i = 0; i < ISAPNP_MAX_DMA; i++) { 926 for (i = 0; i < ISAPNP_MAX_DMA; i++) {
966 ret = isapnp_read_byte(ISAPNP_CFG_DMA + i); 927 ret = isapnp_read_byte(ISAPNP_CFG_DMA + i);
967 if (ret != 4) { 928 pnp_add_dma_resource(dev, ret,
968 pnp_res = pnp_add_dma_resource(dev, ret, 0); 929 ret == 4 ? IORESOURCE_DISABLED : 0);
969 if (pnp_res)
970 pnp_res->index = i;
971 }
972 } 930 }
973 931
974__end: 932__end:
@@ -978,62 +936,45 @@ __end:
978 936
979static int isapnp_set_resources(struct pnp_dev *dev) 937static int isapnp_set_resources(struct pnp_dev *dev)
980{ 938{
981 struct pnp_resource *pnp_res;
982 struct resource *res; 939 struct resource *res;
983 int tmp, index; 940 int tmp;
984 941
985 dev_dbg(&dev->dev, "set resources\n"); 942 dev_dbg(&dev->dev, "set resources\n");
986 isapnp_cfg_begin(dev->card->number, dev->number); 943 isapnp_cfg_begin(dev->card->number, dev->number);
987 dev->active = 1; 944 dev->active = 1;
988 for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) { 945 for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) {
989 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, tmp); 946 res = pnp_get_resource(dev, IORESOURCE_IO, tmp);
990 if (!pnp_res) 947 if (pnp_resource_enabled(res)) {
991 continue;
992 res = &pnp_res->res;
993 if (pnp_resource_valid(res)) {
994 index = pnp_res->index;
995 dev_dbg(&dev->dev, " set io %d to %#llx\n", 948 dev_dbg(&dev->dev, " set io %d to %#llx\n",
996 index, (unsigned long long) res->start); 949 tmp, (unsigned long long) res->start);
997 isapnp_write_word(ISAPNP_CFG_PORT + (index << 1), 950 isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1),
998 res->start); 951 res->start);
999 } 952 }
1000 } 953 }
1001 for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) { 954 for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) {
1002 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, tmp); 955 res = pnp_get_resource(dev, IORESOURCE_IRQ, tmp);
1003 if (!pnp_res) 956 if (pnp_resource_enabled(res)) {
1004 continue;
1005 res = &pnp_res->res;
1006 if (pnp_resource_valid(res)) {
1007 int irq = res->start; 957 int irq = res->start;
1008 if (irq == 2) 958 if (irq == 2)
1009 irq = 9; 959 irq = 9;
1010 index = pnp_res->index; 960 dev_dbg(&dev->dev, " set irq %d to %d\n", tmp, irq);
1011 dev_dbg(&dev->dev, " set irq %d to %d\n", index, irq); 961 isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq);
1012 isapnp_write_byte(ISAPNP_CFG_IRQ + (index << 1), irq);
1013 } 962 }
1014 } 963 }
1015 for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) { 964 for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) {
1016 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, tmp); 965 res = pnp_get_resource(dev, IORESOURCE_DMA, tmp);
1017 if (!pnp_res) 966 if (pnp_resource_enabled(res)) {
1018 continue;
1019 res = &pnp_res->res;
1020 if (pnp_resource_valid(res)) {
1021 index = pnp_res->index;
1022 dev_dbg(&dev->dev, " set dma %d to %lld\n", 967 dev_dbg(&dev->dev, " set dma %d to %lld\n",
1023 index, (unsigned long long) res->start); 968 tmp, (unsigned long long) res->start);
1024 isapnp_write_byte(ISAPNP_CFG_DMA + index, res->start); 969 isapnp_write_byte(ISAPNP_CFG_DMA + tmp, res->start);
1025 } 970 }
1026 } 971 }
1027 for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) { 972 for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) {
1028 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, tmp); 973 res = pnp_get_resource(dev, IORESOURCE_MEM, tmp);
1029 if (!pnp_res) 974 if (pnp_resource_enabled(res)) {
1030 continue;
1031 res = &pnp_res->res;
1032 if (pnp_resource_valid(res)) {
1033 index = pnp_res->index;
1034 dev_dbg(&dev->dev, " set mem %d to %#llx\n", 975 dev_dbg(&dev->dev, " set mem %d to %#llx\n",
1035 index, (unsigned long long) res->start); 976 tmp, (unsigned long long) res->start);
1036 isapnp_write_word(ISAPNP_CFG_MEM + (index << 3), 977 isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3),
1037 (res->start >> 8) & 0xffff); 978 (res->start >> 8) & 0xffff);
1038 } 979 }
1039 } 980 }
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index bea0914ff947..b526eaad3f6c 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -3,6 +3,8 @@
3 * 3 *
4 * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz> 4 * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz>
5 * Copyright 2003 Adam Belay <ambx1@neo.rr.com> 5 * Copyright 2003 Adam Belay <ambx1@neo.rr.com>
6 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
7 * Bjorn Helgaas <bjorn.helgaas@hp.com>
6 */ 8 */
7 9
8#include <linux/errno.h> 10#include <linux/errno.h>
@@ -19,82 +21,64 @@ DEFINE_MUTEX(pnp_res_mutex);
19 21
20static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx) 22static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
21{ 23{
22 struct pnp_resource *pnp_res; 24 struct resource *res, local_res;
23 struct resource *res;
24
25 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, idx);
26 if (!pnp_res) {
27 dev_err(&dev->dev, "too many I/O port resources\n");
28 /* pretend we were successful so at least the manager won't try again */
29 return 1;
30 }
31
32 res = &pnp_res->res;
33 25
34 /* check if this resource has been manually set, if so skip */ 26 res = pnp_get_resource(dev, IORESOURCE_IO, idx);
35 if (!(res->flags & IORESOURCE_AUTO)) { 27 if (res) {
36 dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx " 28 dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
37 "flags %#lx\n", idx, (unsigned long long) res->start, 29 "flags %#lx\n", idx, (unsigned long long) res->start,
38 (unsigned long long) res->end, res->flags); 30 (unsigned long long) res->end, res->flags);
39 return 1; 31 return 0;
40 } 32 }
41 33
42 /* set the initial values */ 34 res = &local_res;
43 pnp_res->index = idx; 35 res->flags = rule->flags | IORESOURCE_AUTO;
44 res->flags |= rule->flags | IORESOURCE_IO; 36 res->start = 0;
45 res->flags &= ~IORESOURCE_UNSET; 37 res->end = 0;
46 38
47 if (!rule->size) { 39 if (!rule->size) {
48 res->flags |= IORESOURCE_DISABLED; 40 res->flags |= IORESOURCE_DISABLED;
49 dev_dbg(&dev->dev, " io %d disabled\n", idx); 41 dev_dbg(&dev->dev, " io %d disabled\n", idx);
50 return 1; /* skip disabled resource requests */ 42 goto __add;
51 } 43 }
52 44
53 res->start = rule->min; 45 res->start = rule->min;
54 res->end = res->start + rule->size - 1; 46 res->end = res->start + rule->size - 1;
55 47
56 /* run through until pnp_check_port is happy */
57 while (!pnp_check_port(dev, res)) { 48 while (!pnp_check_port(dev, res)) {
58 res->start += rule->align; 49 res->start += rule->align;
59 res->end = res->start + rule->size - 1; 50 res->end = res->start + rule->size - 1;
60 if (res->start > rule->max || !rule->align) { 51 if (res->start > rule->max || !rule->align) {
61 dev_dbg(&dev->dev, " couldn't assign io %d\n", idx); 52 dev_dbg(&dev->dev, " couldn't assign io %d "
62 return 0; 53 "(min %#llx max %#llx)\n", idx,
54 (unsigned long long) rule->min,
55 (unsigned long long) rule->max);
56 return -EBUSY;
63 } 57 }
64 } 58 }
65 dev_dbg(&dev->dev, " assign io %d %#llx-%#llx\n", idx, 59
66 (unsigned long long) res->start, (unsigned long long) res->end); 60__add:
67 return 1; 61 pnp_add_io_resource(dev, res->start, res->end, res->flags);
62 return 0;
68} 63}
69 64
70static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) 65static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
71{ 66{
72 struct pnp_resource *pnp_res; 67 struct resource *res, local_res;
73 struct resource *res;
74
75 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, idx);
76 if (!pnp_res) {
77 dev_err(&dev->dev, "too many memory resources\n");
78 /* pretend we were successful so at least the manager won't try again */
79 return 1;
80 }
81 68
82 res = &pnp_res->res; 69 res = pnp_get_resource(dev, IORESOURCE_MEM, idx);
83 70 if (res) {
84 /* check if this resource has been manually set, if so skip */
85 if (!(res->flags & IORESOURCE_AUTO)) {
86 dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx " 71 dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
87 "flags %#lx\n", idx, (unsigned long long) res->start, 72 "flags %#lx\n", idx, (unsigned long long) res->start,
88 (unsigned long long) res->end, res->flags); 73 (unsigned long long) res->end, res->flags);
89 return 1; 74 return 0;
90 } 75 }
91 76
92 /* set the initial values */ 77 res = &local_res;
93 pnp_res->index = idx; 78 res->flags = rule->flags | IORESOURCE_AUTO;
94 res->flags |= rule->flags | IORESOURCE_MEM; 79 res->start = 0;
95 res->flags &= ~IORESOURCE_UNSET; 80 res->end = 0;
96 81
97 /* convert pnp flags to standard Linux flags */
98 if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) 82 if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
99 res->flags |= IORESOURCE_READONLY; 83 res->flags |= IORESOURCE_READONLY;
100 if (rule->flags & IORESOURCE_MEM_CACHEABLE) 84 if (rule->flags & IORESOURCE_MEM_CACHEABLE)
@@ -107,30 +91,32 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
107 if (!rule->size) { 91 if (!rule->size) {
108 res->flags |= IORESOURCE_DISABLED; 92 res->flags |= IORESOURCE_DISABLED;
109 dev_dbg(&dev->dev, " mem %d disabled\n", idx); 93 dev_dbg(&dev->dev, " mem %d disabled\n", idx);
110 return 1; /* skip disabled resource requests */ 94 goto __add;
111 } 95 }
112 96
113 res->start = rule->min; 97 res->start = rule->min;
114 res->end = res->start + rule->size - 1; 98 res->end = res->start + rule->size - 1;
115 99
116 /* run through until pnp_check_mem is happy */
117 while (!pnp_check_mem(dev, res)) { 100 while (!pnp_check_mem(dev, res)) {
118 res->start += rule->align; 101 res->start += rule->align;
119 res->end = res->start + rule->size - 1; 102 res->end = res->start + rule->size - 1;
120 if (res->start > rule->max || !rule->align) { 103 if (res->start > rule->max || !rule->align) {
121 dev_dbg(&dev->dev, " couldn't assign mem %d\n", idx); 104 dev_dbg(&dev->dev, " couldn't assign mem %d "
122 return 0; 105 "(min %#llx max %#llx)\n", idx,
106 (unsigned long long) rule->min,
107 (unsigned long long) rule->max);
108 return -EBUSY;
123 } 109 }
124 } 110 }
125 dev_dbg(&dev->dev, " assign mem %d %#llx-%#llx\n", idx, 111
126 (unsigned long long) res->start, (unsigned long long) res->end); 112__add:
127 return 1; 113 pnp_add_mem_resource(dev, res->start, res->end, res->flags);
114 return 0;
128} 115}
129 116
130static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) 117static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
131{ 118{
132 struct pnp_resource *pnp_res; 119 struct resource *res, local_res;
133 struct resource *res;
134 int i; 120 int i;
135 121
136 /* IRQ priority: this table is good for i386 */ 122 /* IRQ priority: this table is good for i386 */
@@ -138,59 +124,57 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
138 5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2 124 5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
139 }; 125 };
140 126
141 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, idx); 127 res = pnp_get_resource(dev, IORESOURCE_IRQ, idx);
142 if (!pnp_res) { 128 if (res) {
143 dev_err(&dev->dev, "too many IRQ resources\n");
144 /* pretend we were successful so at least the manager won't try again */
145 return 1;
146 }
147
148 res = &pnp_res->res;
149
150 /* check if this resource has been manually set, if so skip */
151 if (!(res->flags & IORESOURCE_AUTO)) {
152 dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n", 129 dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
153 idx, (int) res->start, res->flags); 130 idx, (int) res->start, res->flags);
154 return 1; 131 return 0;
155 } 132 }
156 133
157 /* set the initial values */ 134 res = &local_res;
158 pnp_res->index = idx; 135 res->flags = rule->flags | IORESOURCE_AUTO;
159 res->flags |= rule->flags | IORESOURCE_IRQ; 136 res->start = -1;
160 res->flags &= ~IORESOURCE_UNSET; 137 res->end = -1;
161 138
162 if (bitmap_empty(rule->map, PNP_IRQ_NR)) { 139 if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) {
163 res->flags |= IORESOURCE_DISABLED; 140 res->flags |= IORESOURCE_DISABLED;
164 dev_dbg(&dev->dev, " irq %d disabled\n", idx); 141 dev_dbg(&dev->dev, " irq %d disabled\n", idx);
165 return 1; /* skip disabled resource requests */ 142 goto __add;
166 } 143 }
167 144
168 /* TBD: need check for >16 IRQ */ 145 /* TBD: need check for >16 IRQ */
169 res->start = find_next_bit(rule->map, PNP_IRQ_NR, 16); 146 res->start = find_next_bit(rule->map.bits, PNP_IRQ_NR, 16);
170 if (res->start < PNP_IRQ_NR) { 147 if (res->start < PNP_IRQ_NR) {
171 res->end = res->start; 148 res->end = res->start;
172 dev_dbg(&dev->dev, " assign irq %d %d\n", idx, 149 goto __add;
173 (int) res->start);
174 return 1;
175 } 150 }
176 for (i = 0; i < 16; i++) { 151 for (i = 0; i < 16; i++) {
177 if (test_bit(xtab[i], rule->map)) { 152 if (test_bit(xtab[i], rule->map.bits)) {
178 res->start = res->end = xtab[i]; 153 res->start = res->end = xtab[i];
179 if (pnp_check_irq(dev, res)) { 154 if (pnp_check_irq(dev, res))
180 dev_dbg(&dev->dev, " assign irq %d %d\n", idx, 155 goto __add;
181 (int) res->start);
182 return 1;
183 }
184 } 156 }
185 } 157 }
158
159 if (rule->flags & IORESOURCE_IRQ_OPTIONAL) {
160 res->start = -1;
161 res->end = -1;
162 res->flags |= IORESOURCE_DISABLED;
163 dev_dbg(&dev->dev, " irq %d disabled (optional)\n", idx);
164 goto __add;
165 }
166
186 dev_dbg(&dev->dev, " couldn't assign irq %d\n", idx); 167 dev_dbg(&dev->dev, " couldn't assign irq %d\n", idx);
168 return -EBUSY;
169
170__add:
171 pnp_add_irq_resource(dev, res->start, res->flags);
187 return 0; 172 return 0;
188} 173}
189 174
190static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) 175static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
191{ 176{
192 struct pnp_resource *pnp_res; 177 struct resource *res, local_res;
193 struct resource *res;
194 int i; 178 int i;
195 179
196 /* DMA priority: this table is good for i386 */ 180 /* DMA priority: this table is good for i386 */
@@ -198,231 +182,99 @@ static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
198 1, 3, 5, 6, 7, 0, 2, 4 182 1, 3, 5, 6, 7, 0, 2, 4
199 }; 183 };
200 184
201 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, idx); 185 res = pnp_get_resource(dev, IORESOURCE_DMA, idx);
202 if (!pnp_res) { 186 if (res) {
203 dev_err(&dev->dev, "too many DMA resources\n");
204 return;
205 }
206
207 res = &pnp_res->res;
208
209 /* check if this resource has been manually set, if so skip */
210 if (!(res->flags & IORESOURCE_AUTO)) {
211 dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n", 187 dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
212 idx, (int) res->start, res->flags); 188 idx, (int) res->start, res->flags);
213 return; 189 return 0;
214 } 190 }
215 191
216 /* set the initial values */ 192 res = &local_res;
217 pnp_res->index = idx; 193 res->flags = rule->flags | IORESOURCE_AUTO;
218 res->flags |= rule->flags | IORESOURCE_DMA; 194 res->start = -1;
219 res->flags &= ~IORESOURCE_UNSET; 195 res->end = -1;
220 196
221 for (i = 0; i < 8; i++) { 197 for (i = 0; i < 8; i++) {
222 if (rule->map & (1 << xtab[i])) { 198 if (rule->map & (1 << xtab[i])) {
223 res->start = res->end = xtab[i]; 199 res->start = res->end = xtab[i];
224 if (pnp_check_dma(dev, res)) { 200 if (pnp_check_dma(dev, res))
225 dev_dbg(&dev->dev, " assign dma %d %d\n", idx, 201 goto __add;
226 (int) res->start);
227 return;
228 }
229 } 202 }
230 } 203 }
231#ifdef MAX_DMA_CHANNELS 204#ifdef MAX_DMA_CHANNELS
232 res->start = res->end = MAX_DMA_CHANNELS; 205 res->start = res->end = MAX_DMA_CHANNELS;
233#endif 206#endif
234 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; 207 res->flags |= IORESOURCE_DISABLED;
235 dev_dbg(&dev->dev, " disable dma %d\n", idx); 208 dev_dbg(&dev->dev, " disable dma %d\n", idx);
236}
237
238void pnp_init_resource(struct resource *res)
239{
240 unsigned long type;
241
242 type = res->flags & (IORESOURCE_IO | IORESOURCE_MEM |
243 IORESOURCE_IRQ | IORESOURCE_DMA);
244 209
245 res->name = NULL; 210__add:
246 res->flags = type | IORESOURCE_AUTO | IORESOURCE_UNSET; 211 pnp_add_dma_resource(dev, res->start, res->flags);
247 if (type == IORESOURCE_IRQ || type == IORESOURCE_DMA) { 212 return 0;
248 res->start = -1;
249 res->end = -1;
250 } else {
251 res->start = 0;
252 res->end = 0;
253 }
254} 213}
255 214
256/**
257 * pnp_init_resources - Resets a resource table to default values.
258 * @table: pointer to the desired resource table
259 */
260void pnp_init_resources(struct pnp_dev *dev) 215void pnp_init_resources(struct pnp_dev *dev)
261{ 216{
262 struct resource *res; 217 pnp_free_resources(dev);
263 int idx;
264
265 for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
266 res = &dev->res->irq[idx].res;
267 res->flags = IORESOURCE_IRQ;
268 pnp_init_resource(res);
269 }
270 for (idx = 0; idx < PNP_MAX_DMA; idx++) {
271 res = &dev->res->dma[idx].res;
272 res->flags = IORESOURCE_DMA;
273 pnp_init_resource(res);
274 }
275 for (idx = 0; idx < PNP_MAX_PORT; idx++) {
276 res = &dev->res->port[idx].res;
277 res->flags = IORESOURCE_IO;
278 pnp_init_resource(res);
279 }
280 for (idx = 0; idx < PNP_MAX_MEM; idx++) {
281 res = &dev->res->mem[idx].res;
282 res->flags = IORESOURCE_MEM;
283 pnp_init_resource(res);
284 }
285} 218}
286 219
287/**
288 * pnp_clean_resources - clears resources that were not manually set
289 * @res: the resources to clean
290 */
291static void pnp_clean_resource_table(struct pnp_dev *dev) 220static void pnp_clean_resource_table(struct pnp_dev *dev)
292{ 221{
293 struct resource *res; 222 struct pnp_resource *pnp_res, *tmp;
294 int idx; 223
295 224 list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) {
296 for (idx = 0; idx < PNP_MAX_IRQ; idx++) { 225 if (pnp_res->res.flags & IORESOURCE_AUTO)
297 res = &dev->res->irq[idx].res; 226 pnp_free_resource(pnp_res);
298 if (res->flags & IORESOURCE_AUTO) {
299 res->flags = IORESOURCE_IRQ;
300 pnp_init_resource(res);
301 }
302 }
303 for (idx = 0; idx < PNP_MAX_DMA; idx++) {
304 res = &dev->res->dma[idx].res;
305 if (res->flags & IORESOURCE_AUTO) {
306 res->flags = IORESOURCE_DMA;
307 pnp_init_resource(res);
308 }
309 }
310 for (idx = 0; idx < PNP_MAX_PORT; idx++) {
311 res = &dev->res->port[idx].res;
312 if (res->flags & IORESOURCE_AUTO) {
313 res->flags = IORESOURCE_IO;
314 pnp_init_resource(res);
315 }
316 }
317 for (idx = 0; idx < PNP_MAX_MEM; idx++) {
318 res = &dev->res->mem[idx].res;
319 if (res->flags & IORESOURCE_AUTO) {
320 res->flags = IORESOURCE_MEM;
321 pnp_init_resource(res);
322 }
323 } 227 }
324} 228}
325 229
326/** 230/**
327 * pnp_assign_resources - assigns resources to the device based on the specified dependent number 231 * pnp_assign_resources - assigns resources to the device based on the specified dependent number
328 * @dev: pointer to the desired device 232 * @dev: pointer to the desired device
329 * @depnum: the dependent function number 233 * @set: the dependent function number
330 *
331 * Only set depnum to 0 if the device does not have dependent options.
332 */ 234 */
333static int pnp_assign_resources(struct pnp_dev *dev, int depnum) 235static int pnp_assign_resources(struct pnp_dev *dev, int set)
334{ 236{
335 struct pnp_port *port; 237 struct pnp_option *option;
336 struct pnp_mem *mem;
337 struct pnp_irq *irq;
338 struct pnp_dma *dma;
339 int nport = 0, nmem = 0, nirq = 0, ndma = 0; 238 int nport = 0, nmem = 0, nirq = 0, ndma = 0;
239 int ret = 0;
340 240
341 if (!pnp_can_configure(dev)) 241 dev_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set);
342 return -ENODEV;
343
344 dbg_pnp_show_resources(dev, "before pnp_assign_resources");
345 mutex_lock(&pnp_res_mutex); 242 mutex_lock(&pnp_res_mutex);
346 pnp_clean_resource_table(dev); 243 pnp_clean_resource_table(dev);
347 if (dev->independent) {
348 dev_dbg(&dev->dev, "assigning independent options\n");
349 port = dev->independent->port;
350 mem = dev->independent->mem;
351 irq = dev->independent->irq;
352 dma = dev->independent->dma;
353 while (port) {
354 if (!pnp_assign_port(dev, port, nport))
355 goto fail;
356 nport++;
357 port = port->next;
358 }
359 while (mem) {
360 if (!pnp_assign_mem(dev, mem, nmem))
361 goto fail;
362 nmem++;
363 mem = mem->next;
364 }
365 while (irq) {
366 if (!pnp_assign_irq(dev, irq, nirq))
367 goto fail;
368 nirq++;
369 irq = irq->next;
370 }
371 while (dma) {
372 pnp_assign_dma(dev, dma, ndma);
373 ndma++;
374 dma = dma->next;
375 }
376 }
377 244
378 if (depnum) { 245 list_for_each_entry(option, &dev->options, list) {
379 struct pnp_option *dep; 246 if (pnp_option_is_dependent(option) &&
380 int i; 247 pnp_option_set(option) != set)
381 248 continue;
382 dev_dbg(&dev->dev, "assigning dependent option %d\n", depnum); 249
383 for (i = 1, dep = dev->dependent; i < depnum; 250 switch (option->type) {
384 i++, dep = dep->next) 251 case IORESOURCE_IO:
385 if (!dep) 252 ret = pnp_assign_port(dev, &option->u.port, nport++);
386 goto fail; 253 break;
387 port = dep->port; 254 case IORESOURCE_MEM:
388 mem = dep->mem; 255 ret = pnp_assign_mem(dev, &option->u.mem, nmem++);
389 irq = dep->irq; 256 break;
390 dma = dep->dma; 257 case IORESOURCE_IRQ:
391 while (port) { 258 ret = pnp_assign_irq(dev, &option->u.irq, nirq++);
392 if (!pnp_assign_port(dev, port, nport)) 259 break;
393 goto fail; 260 case IORESOURCE_DMA:
394 nport++; 261 ret = pnp_assign_dma(dev, &option->u.dma, ndma++);
395 port = port->next; 262 break;
396 } 263 default:
397 while (mem) { 264 ret = -EINVAL;
398 if (!pnp_assign_mem(dev, mem, nmem)) 265 break;
399 goto fail;
400 nmem++;
401 mem = mem->next;
402 }
403 while (irq) {
404 if (!pnp_assign_irq(dev, irq, nirq))
405 goto fail;
406 nirq++;
407 irq = irq->next;
408 } 266 }
409 while (dma) { 267 if (ret < 0)
410 pnp_assign_dma(dev, dma, ndma); 268 break;
411 ndma++; 269 }
412 dma = dma->next;
413 }
414 } else if (dev->dependent)
415 goto fail;
416
417 mutex_unlock(&pnp_res_mutex);
418 dbg_pnp_show_resources(dev, "after pnp_assign_resources");
419 return 1;
420 270
421fail:
422 pnp_clean_resource_table(dev);
423 mutex_unlock(&pnp_res_mutex); 271 mutex_unlock(&pnp_res_mutex);
424 dbg_pnp_show_resources(dev, "after pnp_assign_resources (failed)"); 272 if (ret < 0) {
425 return 0; 273 dev_dbg(&dev->dev, "pnp_assign_resources failed (%d)\n", ret);
274 pnp_clean_resource_table(dev);
275 } else
276 dbg_pnp_show_resources(dev, "pnp_assign_resources succeeded");
277 return ret;
426} 278}
427 279
428/** 280/**
@@ -431,29 +283,25 @@ fail:
431 */ 283 */
432int pnp_auto_config_dev(struct pnp_dev *dev) 284int pnp_auto_config_dev(struct pnp_dev *dev)
433{ 285{
434 struct pnp_option *dep; 286 int i, ret;
435 int i = 1;
436 287
437 if (!pnp_can_configure(dev)) { 288 if (!pnp_can_configure(dev)) {
438 dev_dbg(&dev->dev, "configuration not supported\n"); 289 dev_dbg(&dev->dev, "configuration not supported\n");
439 return -ENODEV; 290 return -ENODEV;
440 } 291 }
441 292
442 if (!dev->dependent) { 293 ret = pnp_assign_resources(dev, 0);
443 if (pnp_assign_resources(dev, 0)) 294 if (ret == 0)
295 return 0;
296
297 for (i = 1; i < dev->num_dependent_sets; i++) {
298 ret = pnp_assign_resources(dev, i);
299 if (ret == 0)
444 return 0; 300 return 0;
445 } else {
446 dep = dev->dependent;
447 do {
448 if (pnp_assign_resources(dev, i))
449 return 0;
450 dep = dep->next;
451 i++;
452 } while (dep);
453 } 301 }
454 302
455 dev_err(&dev->dev, "unable to assign resources\n"); 303 dev_err(&dev->dev, "unable to assign resources\n");
456 return -EBUSY; 304 return ret;
457} 305}
458 306
459/** 307/**
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 50902773beaf..c1b9ea34977b 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -117,9 +117,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
117{ 117{
118 int power_state; 118 int power_state;
119 119
120 power_state = acpi_pm_device_sleep_state(&dev->dev, 120 power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
121 device_may_wakeup(&dev->dev),
122 NULL);
123 if (power_state < 0) 121 if (power_state < 0)
124 power_state = (state.event == PM_EVENT_ON) ? 122 power_state = (state.event == PM_EVENT_ON) ?
125 ACPI_STATE_D0 : ACPI_STATE_D3; 123 ACPI_STATE_D0 : ACPI_STATE_D3;
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 46c791adb894..d7e9f2152df0 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -3,6 +3,8 @@
3 * 3 *
4 * Copyright (c) 2004 Matthieu Castet <castet.matthieu@free.fr> 4 * Copyright (c) 2004 Matthieu Castet <castet.matthieu@free.fr>
5 * Copyright (c) 2004 Li Shaohua <shaohua.li@intel.com> 5 * Copyright (c) 2004 Li Shaohua <shaohua.li@intel.com>
6 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
7 * Bjorn Helgaas <bjorn.helgaas@hp.com>
6 * 8 *
7 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 10 * under the terms of the GNU General Public License as published by the
@@ -98,8 +100,10 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
98 int irq, flags; 100 int irq, flags;
99 int p, t; 101 int p, t;
100 102
101 if (!valid_IRQ(gsi)) 103 if (!valid_IRQ(gsi)) {
104 pnp_add_irq_resource(dev, gsi, IORESOURCE_DISABLED);
102 return; 105 return;
106 }
103 107
104 /* 108 /*
105 * in IO-APIC mode, use overrided attribute. Two reasons: 109 * in IO-APIC mode, use overrided attribute. Two reasons:
@@ -178,13 +182,68 @@ static void pnpacpi_parse_allocated_ioresource(struct pnp_dev *dev, u64 start,
178 u64 end = start + len - 1; 182 u64 end = start + len - 1;
179 183
180 if (io_decode == ACPI_DECODE_16) 184 if (io_decode == ACPI_DECODE_16)
181 flags |= PNP_PORT_FLAG_16BITADDR; 185 flags |= IORESOURCE_IO_16BIT_ADDR;
182 if (len == 0 || end >= 0x10003) 186 if (len == 0 || end >= 0x10003)
183 flags |= IORESOURCE_DISABLED; 187 flags |= IORESOURCE_DISABLED;
184 188
185 pnp_add_io_resource(dev, start, end, flags); 189 pnp_add_io_resource(dev, start, end, flags);
186} 190}
187 191
192/*
193 * Device CSRs that do not appear in PCI config space should be described
194 * via ACPI. This would normally be done with Address Space Descriptors
195 * marked as "consumer-only," but old versions of Windows and Linux ignore
196 * the producer/consumer flag, so HP invented a vendor-defined resource to
197 * describe the location and size of CSR space.
198 */
199static struct acpi_vendor_uuid hp_ccsr_uuid = {
200 .subtype = 2,
201 .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a,
202 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad },
203};
204
205static int vendor_resource_matches(struct pnp_dev *dev,
206 struct acpi_resource_vendor_typed *vendor,
207 struct acpi_vendor_uuid *match,
208 int expected_len)
209{
210 int uuid_len = sizeof(vendor->uuid);
211 u8 uuid_subtype = vendor->uuid_subtype;
212 u8 *uuid = vendor->uuid;
213 int actual_len;
214
215 /* byte_length includes uuid_subtype and uuid */
216 actual_len = vendor->byte_length - uuid_len - 1;
217
218 if (uuid_subtype == match->subtype &&
219 uuid_len == sizeof(match->data) &&
220 memcmp(uuid, match->data, uuid_len) == 0) {
221 if (expected_len && expected_len != actual_len) {
222 dev_err(&dev->dev, "wrong vendor descriptor size; "
223 "expected %d, found %d bytes\n",
224 expected_len, actual_len);
225 return 0;
226 }
227
228 return 1;
229 }
230
231 return 0;
232}
233
234static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev,
235 struct acpi_resource_vendor_typed *vendor)
236{
237 if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) {
238 u64 start, length;
239
240 memcpy(&start, vendor->byte_data, sizeof(start));
241 memcpy(&length, vendor->byte_data + 8, sizeof(length));
242
243 pnp_add_mem_resource(dev, start, start + length - 1, 0);
244 }
245}
246
188static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev, 247static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev,
189 u64 start, u64 len, 248 u64 start, u64 len,
190 int write_protect) 249 int write_protect)
@@ -235,6 +294,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
235 struct acpi_resource_dma *dma; 294 struct acpi_resource_dma *dma;
236 struct acpi_resource_io *io; 295 struct acpi_resource_io *io;
237 struct acpi_resource_fixed_io *fixed_io; 296 struct acpi_resource_fixed_io *fixed_io;
297 struct acpi_resource_vendor_typed *vendor_typed;
238 struct acpi_resource_memory24 *memory24; 298 struct acpi_resource_memory24 *memory24;
239 struct acpi_resource_memory32 *memory32; 299 struct acpi_resource_memory32 *memory32;
240 struct acpi_resource_fixed_memory32 *fixed_memory32; 300 struct acpi_resource_fixed_memory32 *fixed_memory32;
@@ -248,24 +308,39 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
248 * _CRS, but some firmware violates this, so parse them all. 308 * _CRS, but some firmware violates this, so parse them all.
249 */ 309 */
250 irq = &res->data.irq; 310 irq = &res->data.irq;
251 for (i = 0; i < irq->interrupt_count; i++) { 311 if (irq->interrupt_count == 0)
252 pnpacpi_parse_allocated_irqresource(dev, 312 pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
253 irq->interrupts[i], 313 else {
254 irq->triggering, 314 for (i = 0; i < irq->interrupt_count; i++) {
255 irq->polarity, 315 pnpacpi_parse_allocated_irqresource(dev,
256 irq->sharable); 316 irq->interrupts[i],
317 irq->triggering,
318 irq->polarity,
319 irq->sharable);
320 }
321
322 /*
323 * The IRQ encoder puts a single interrupt in each
324 * descriptor, so if a _CRS descriptor has more than
325 * one interrupt, we won't be able to re-encode it.
326 */
327 if (pnp_can_write(dev) && irq->interrupt_count > 1) {
328 dev_warn(&dev->dev, "multiple interrupts in "
329 "_CRS descriptor; configuration can't "
330 "be changed\n");
331 dev->capabilities &= ~PNP_WRITE;
332 }
257 } 333 }
258 break; 334 break;
259 335
260 case ACPI_RESOURCE_TYPE_DMA: 336 case ACPI_RESOURCE_TYPE_DMA:
261 dma = &res->data.dma; 337 dma = &res->data.dma;
262 if (dma->channel_count > 0) { 338 if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
263 flags = dma_flags(dma->type, dma->bus_master, 339 flags = dma_flags(dma->type, dma->bus_master,
264 dma->transfer); 340 dma->transfer);
265 if (dma->channels[0] == (u8) -1) 341 else
266 flags |= IORESOURCE_DISABLED; 342 flags = IORESOURCE_DISABLED;
267 pnp_add_dma_resource(dev, dma->channels[0], flags); 343 pnp_add_dma_resource(dev, dma->channels[0], flags);
268 }
269 break; 344 break;
270 345
271 case ACPI_RESOURCE_TYPE_IO: 346 case ACPI_RESOURCE_TYPE_IO:
@@ -289,6 +364,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
289 break; 364 break;
290 365
291 case ACPI_RESOURCE_TYPE_VENDOR: 366 case ACPI_RESOURCE_TYPE_VENDOR:
367 vendor_typed = &res->data.vendor_typed;
368 pnpacpi_parse_allocated_vendor(dev, vendor_typed);
292 break; 369 break;
293 370
294 case ACPI_RESOURCE_TYPE_END_TAG: 371 case ACPI_RESOURCE_TYPE_END_TAG:
@@ -331,12 +408,29 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
331 if (extended_irq->producer_consumer == ACPI_PRODUCER) 408 if (extended_irq->producer_consumer == ACPI_PRODUCER)
332 return AE_OK; 409 return AE_OK;
333 410
334 for (i = 0; i < extended_irq->interrupt_count; i++) { 411 if (extended_irq->interrupt_count == 0)
335 pnpacpi_parse_allocated_irqresource(dev, 412 pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
336 extended_irq->interrupts[i], 413 else {
337 extended_irq->triggering, 414 for (i = 0; i < extended_irq->interrupt_count; i++) {
338 extended_irq->polarity, 415 pnpacpi_parse_allocated_irqresource(dev,
339 extended_irq->sharable); 416 extended_irq->interrupts[i],
417 extended_irq->triggering,
418 extended_irq->polarity,
419 extended_irq->sharable);
420 }
421
422 /*
423 * The IRQ encoder puts a single interrupt in each
424 * descriptor, so if a _CRS descriptor has more than
425 * one interrupt, we won't be able to re-encode it.
426 */
427 if (pnp_can_write(dev) &&
428 extended_irq->interrupt_count > 1) {
429 dev_warn(&dev->dev, "multiple interrupts in "
430 "_CRS descriptor; configuration can't "
431 "be changed\n");
432 dev->capabilities &= ~PNP_WRITE;
433 }
340 } 434 }
341 break; 435 break;
342 436
@@ -373,179 +467,147 @@ int pnpacpi_parse_allocated_resource(struct pnp_dev *dev)
373} 467}
374 468
375static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev, 469static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
376 struct pnp_option *option, 470 unsigned int option_flags,
377 struct acpi_resource_dma *p) 471 struct acpi_resource_dma *p)
378{ 472{
379 int i; 473 int i;
380 struct pnp_dma *dma; 474 unsigned char map = 0, flags;
381 475
382 if (p->channel_count == 0) 476 if (p->channel_count == 0)
383 return; 477 return;
384 dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL);
385 if (!dma)
386 return;
387 478
388 for (i = 0; i < p->channel_count; i++) 479 for (i = 0; i < p->channel_count; i++)
389 dma->map |= 1 << p->channels[i]; 480 map |= 1 << p->channels[i];
390
391 dma->flags = dma_flags(p->type, p->bus_master, p->transfer);
392 481
393 pnp_register_dma_resource(dev, option, dma); 482 flags = dma_flags(p->type, p->bus_master, p->transfer);
483 pnp_register_dma_resource(dev, option_flags, map, flags);
394} 484}
395 485
396static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, 486static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
397 struct pnp_option *option, 487 unsigned int option_flags,
398 struct acpi_resource_irq *p) 488 struct acpi_resource_irq *p)
399{ 489{
400 int i; 490 int i;
401 struct pnp_irq *irq; 491 pnp_irq_mask_t map;
492 unsigned char flags;
402 493
403 if (p->interrupt_count == 0) 494 if (p->interrupt_count == 0)
404 return; 495 return;
405 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
406 if (!irq)
407 return;
408 496
497 bitmap_zero(map.bits, PNP_IRQ_NR);
409 for (i = 0; i < p->interrupt_count; i++) 498 for (i = 0; i < p->interrupt_count; i++)
410 if (p->interrupts[i]) 499 if (p->interrupts[i])
411 __set_bit(p->interrupts[i], irq->map); 500 __set_bit(p->interrupts[i], map.bits);
412 irq->flags = irq_flags(p->triggering, p->polarity, p->sharable);
413 501
414 pnp_register_irq_resource(dev, option, irq); 502 flags = irq_flags(p->triggering, p->polarity, p->sharable);
503 pnp_register_irq_resource(dev, option_flags, &map, flags);
415} 504}
416 505
417static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, 506static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
418 struct pnp_option *option, 507 unsigned int option_flags,
419 struct acpi_resource_extended_irq *p) 508 struct acpi_resource_extended_irq *p)
420{ 509{
421 int i; 510 int i;
422 struct pnp_irq *irq; 511 pnp_irq_mask_t map;
512 unsigned char flags;
423 513
424 if (p->interrupt_count == 0) 514 if (p->interrupt_count == 0)
425 return; 515 return;
426 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
427 if (!irq)
428 return;
429 516
430 for (i = 0; i < p->interrupt_count; i++) 517 bitmap_zero(map.bits, PNP_IRQ_NR);
431 if (p->interrupts[i]) 518 for (i = 0; i < p->interrupt_count; i++) {
432 __set_bit(p->interrupts[i], irq->map); 519 if (p->interrupts[i]) {
433 irq->flags = irq_flags(p->triggering, p->polarity, p->sharable); 520 if (p->interrupts[i] < PNP_IRQ_NR)
521 __set_bit(p->interrupts[i], map.bits);
522 else
523 dev_err(&dev->dev, "ignoring IRQ %d option "
524 "(too large for %d entry bitmap)\n",
525 p->interrupts[i], PNP_IRQ_NR);
526 }
527 }
434 528
435 pnp_register_irq_resource(dev, option, irq); 529 flags = irq_flags(p->triggering, p->polarity, p->sharable);
530 pnp_register_irq_resource(dev, option_flags, &map, flags);
436} 531}
437 532
438static __init void pnpacpi_parse_port_option(struct pnp_dev *dev, 533static __init void pnpacpi_parse_port_option(struct pnp_dev *dev,
439 struct pnp_option *option, 534 unsigned int option_flags,
440 struct acpi_resource_io *io) 535 struct acpi_resource_io *io)
441{ 536{
442 struct pnp_port *port; 537 unsigned char flags = 0;
443 538
444 if (io->address_length == 0) 539 if (io->address_length == 0)
445 return; 540 return;
446 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); 541
447 if (!port) 542 if (io->io_decode == ACPI_DECODE_16)
448 return; 543 flags = IORESOURCE_IO_16BIT_ADDR;
449 port->min = io->minimum; 544 pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum,
450 port->max = io->maximum; 545 io->alignment, io->address_length, flags);
451 port->align = io->alignment;
452 port->size = io->address_length;
453 port->flags = ACPI_DECODE_16 == io->io_decode ?
454 PNP_PORT_FLAG_16BITADDR : 0;
455 pnp_register_port_resource(dev, option, port);
456} 546}
457 547
458static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev, 548static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev,
459 struct pnp_option *option, 549 unsigned int option_flags,
460 struct acpi_resource_fixed_io *io) 550 struct acpi_resource_fixed_io *io)
461{ 551{
462 struct pnp_port *port;
463
464 if (io->address_length == 0) 552 if (io->address_length == 0)
465 return; 553 return;
466 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); 554
467 if (!port) 555 pnp_register_port_resource(dev, option_flags, io->address, io->address,
468 return; 556 0, io->address_length, IORESOURCE_IO_FIXED);
469 port->min = port->max = io->address;
470 port->size = io->address_length;
471 port->align = 0;
472 port->flags = PNP_PORT_FLAG_FIXED;
473 pnp_register_port_resource(dev, option, port);
474} 557}
475 558
476static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, 559static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
477 struct pnp_option *option, 560 unsigned int option_flags,
478 struct acpi_resource_memory24 *p) 561 struct acpi_resource_memory24 *p)
479{ 562{
480 struct pnp_mem *mem; 563 unsigned char flags = 0;
481 564
482 if (p->address_length == 0) 565 if (p->address_length == 0)
483 return; 566 return;
484 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
485 if (!mem)
486 return;
487 mem->min = p->minimum;
488 mem->max = p->maximum;
489 mem->align = p->alignment;
490 mem->size = p->address_length;
491
492 mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
493 IORESOURCE_MEM_WRITEABLE : 0;
494 567
495 pnp_register_mem_resource(dev, option, mem); 568 if (p->write_protect == ACPI_READ_WRITE_MEMORY)
569 flags = IORESOURCE_MEM_WRITEABLE;
570 pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
571 p->alignment, p->address_length, flags);
496} 572}
497 573
498static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev, 574static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev,
499 struct pnp_option *option, 575 unsigned int option_flags,
500 struct acpi_resource_memory32 *p) 576 struct acpi_resource_memory32 *p)
501{ 577{
502 struct pnp_mem *mem; 578 unsigned char flags = 0;
503 579
504 if (p->address_length == 0) 580 if (p->address_length == 0)
505 return; 581 return;
506 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
507 if (!mem)
508 return;
509 mem->min = p->minimum;
510 mem->max = p->maximum;
511 mem->align = p->alignment;
512 mem->size = p->address_length;
513
514 mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
515 IORESOURCE_MEM_WRITEABLE : 0;
516 582
517 pnp_register_mem_resource(dev, option, mem); 583 if (p->write_protect == ACPI_READ_WRITE_MEMORY)
584 flags = IORESOURCE_MEM_WRITEABLE;
585 pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
586 p->alignment, p->address_length, flags);
518} 587}
519 588
520static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev, 589static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev,
521 struct pnp_option *option, 590 unsigned int option_flags,
522 struct acpi_resource_fixed_memory32 *p) 591 struct acpi_resource_fixed_memory32 *p)
523{ 592{
524 struct pnp_mem *mem; 593 unsigned char flags = 0;
525 594
526 if (p->address_length == 0) 595 if (p->address_length == 0)
527 return; 596 return;
528 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
529 if (!mem)
530 return;
531 mem->min = mem->max = p->address;
532 mem->size = p->address_length;
533 mem->align = 0;
534
535 mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
536 IORESOURCE_MEM_WRITEABLE : 0;
537 597
538 pnp_register_mem_resource(dev, option, mem); 598 if (p->write_protect == ACPI_READ_WRITE_MEMORY)
599 flags = IORESOURCE_MEM_WRITEABLE;
600 pnp_register_mem_resource(dev, option_flags, p->address, p->address,
601 0, p->address_length, flags);
539} 602}
540 603
541static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, 604static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
542 struct pnp_option *option, 605 unsigned int option_flags,
543 struct acpi_resource *r) 606 struct acpi_resource *r)
544{ 607{
545 struct acpi_resource_address64 addr, *p = &addr; 608 struct acpi_resource_address64 addr, *p = &addr;
546 acpi_status status; 609 acpi_status status;
547 struct pnp_mem *mem; 610 unsigned char flags = 0;
548 struct pnp_port *port;
549 611
550 status = acpi_resource_to_address64(r, p); 612 status = acpi_resource_to_address64(r, p);
551 if (!ACPI_SUCCESS(status)) { 613 if (!ACPI_SUCCESS(status)) {
@@ -558,49 +620,37 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
558 return; 620 return;
559 621
560 if (p->resource_type == ACPI_MEMORY_RANGE) { 622 if (p->resource_type == ACPI_MEMORY_RANGE) {
561 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); 623 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
562 if (!mem) 624 flags = IORESOURCE_MEM_WRITEABLE;
563 return; 625 pnp_register_mem_resource(dev, option_flags, p->minimum,
564 mem->min = mem->max = p->minimum; 626 p->minimum, 0, p->address_length,
565 mem->size = p->address_length; 627 flags);
566 mem->align = 0; 628 } else if (p->resource_type == ACPI_IO_RANGE)
567 mem->flags = (p->info.mem.write_protect == 629 pnp_register_port_resource(dev, option_flags, p->minimum,
568 ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE 630 p->minimum, 0, p->address_length,
569 : 0; 631 IORESOURCE_IO_FIXED);
570 pnp_register_mem_resource(dev, option, mem);
571 } else if (p->resource_type == ACPI_IO_RANGE) {
572 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
573 if (!port)
574 return;
575 port->min = port->max = p->minimum;
576 port->size = p->address_length;
577 port->align = 0;
578 port->flags = PNP_PORT_FLAG_FIXED;
579 pnp_register_port_resource(dev, option, port);
580 }
581} 632}
582 633
583struct acpipnp_parse_option_s { 634struct acpipnp_parse_option_s {
584 struct pnp_option *option;
585 struct pnp_option *option_independent;
586 struct pnp_dev *dev; 635 struct pnp_dev *dev;
636 unsigned int option_flags;
587}; 637};
588 638
589static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res, 639static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
590 void *data) 640 void *data)
591{ 641{
592 int priority = 0; 642 int priority;
593 struct acpipnp_parse_option_s *parse_data = data; 643 struct acpipnp_parse_option_s *parse_data = data;
594 struct pnp_dev *dev = parse_data->dev; 644 struct pnp_dev *dev = parse_data->dev;
595 struct pnp_option *option = parse_data->option; 645 unsigned int option_flags = parse_data->option_flags;
596 646
597 switch (res->type) { 647 switch (res->type) {
598 case ACPI_RESOURCE_TYPE_IRQ: 648 case ACPI_RESOURCE_TYPE_IRQ:
599 pnpacpi_parse_irq_option(dev, option, &res->data.irq); 649 pnpacpi_parse_irq_option(dev, option_flags, &res->data.irq);
600 break; 650 break;
601 651
602 case ACPI_RESOURCE_TYPE_DMA: 652 case ACPI_RESOURCE_TYPE_DMA:
603 pnpacpi_parse_dma_option(dev, option, &res->data.dma); 653 pnpacpi_parse_dma_option(dev, option_flags, &res->data.dma);
604 break; 654 break;
605 655
606 case ACPI_RESOURCE_TYPE_START_DEPENDENT: 656 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -620,31 +670,19 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
620 priority = PNP_RES_PRIORITY_INVALID; 670 priority = PNP_RES_PRIORITY_INVALID;
621 break; 671 break;
622 } 672 }
623 /* TBD: Consider performance/robustness bits */ 673 parse_data->option_flags = pnp_new_dependent_set(dev, priority);
624 option = pnp_register_dependent_option(dev, priority);
625 if (!option)
626 return AE_ERROR;
627 parse_data->option = option;
628 break; 674 break;
629 675
630 case ACPI_RESOURCE_TYPE_END_DEPENDENT: 676 case ACPI_RESOURCE_TYPE_END_DEPENDENT:
631 /*only one EndDependentFn is allowed */ 677 parse_data->option_flags = 0;
632 if (!parse_data->option_independent) {
633 dev_warn(&dev->dev, "more than one EndDependentFn "
634 "in _PRS\n");
635 return AE_ERROR;
636 }
637 parse_data->option = parse_data->option_independent;
638 parse_data->option_independent = NULL;
639 dev_dbg(&dev->dev, "end dependent options\n");
640 break; 678 break;
641 679
642 case ACPI_RESOURCE_TYPE_IO: 680 case ACPI_RESOURCE_TYPE_IO:
643 pnpacpi_parse_port_option(dev, option, &res->data.io); 681 pnpacpi_parse_port_option(dev, option_flags, &res->data.io);
644 break; 682 break;
645 683
646 case ACPI_RESOURCE_TYPE_FIXED_IO: 684 case ACPI_RESOURCE_TYPE_FIXED_IO:
647 pnpacpi_parse_fixed_port_option(dev, option, 685 pnpacpi_parse_fixed_port_option(dev, option_flags,
648 &res->data.fixed_io); 686 &res->data.fixed_io);
649 break; 687 break;
650 688
@@ -653,29 +691,31 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
653 break; 691 break;
654 692
655 case ACPI_RESOURCE_TYPE_MEMORY24: 693 case ACPI_RESOURCE_TYPE_MEMORY24:
656 pnpacpi_parse_mem24_option(dev, option, &res->data.memory24); 694 pnpacpi_parse_mem24_option(dev, option_flags,
695 &res->data.memory24);
657 break; 696 break;
658 697
659 case ACPI_RESOURCE_TYPE_MEMORY32: 698 case ACPI_RESOURCE_TYPE_MEMORY32:
660 pnpacpi_parse_mem32_option(dev, option, &res->data.memory32); 699 pnpacpi_parse_mem32_option(dev, option_flags,
700 &res->data.memory32);
661 break; 701 break;
662 702
663 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 703 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
664 pnpacpi_parse_fixed_mem32_option(dev, option, 704 pnpacpi_parse_fixed_mem32_option(dev, option_flags,
665 &res->data.fixed_memory32); 705 &res->data.fixed_memory32);
666 break; 706 break;
667 707
668 case ACPI_RESOURCE_TYPE_ADDRESS16: 708 case ACPI_RESOURCE_TYPE_ADDRESS16:
669 case ACPI_RESOURCE_TYPE_ADDRESS32: 709 case ACPI_RESOURCE_TYPE_ADDRESS32:
670 case ACPI_RESOURCE_TYPE_ADDRESS64: 710 case ACPI_RESOURCE_TYPE_ADDRESS64:
671 pnpacpi_parse_address_option(dev, option, res); 711 pnpacpi_parse_address_option(dev, option_flags, res);
672 break; 712 break;
673 713
674 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: 714 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
675 break; 715 break;
676 716
677 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 717 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
678 pnpacpi_parse_ext_irq_option(dev, option, 718 pnpacpi_parse_ext_irq_option(dev, option_flags,
679 &res->data.extended_irq); 719 &res->data.extended_irq);
680 break; 720 break;
681 721
@@ -699,12 +739,9 @@ int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev)
699 739
700 dev_dbg(&dev->dev, "parse resource options\n"); 740 dev_dbg(&dev->dev, "parse resource options\n");
701 741
702 parse_data.option = pnp_register_independent_option(dev);
703 if (!parse_data.option)
704 return -ENOMEM;
705
706 parse_data.option_independent = parse_data.option;
707 parse_data.dev = dev; 742 parse_data.dev = dev;
743 parse_data.option_flags = 0;
744
708 status = acpi_walk_resources(handle, METHOD_NAME__PRS, 745 status = acpi_walk_resources(handle, METHOD_NAME__PRS,
709 pnpacpi_option_resource, &parse_data); 746 pnpacpi_option_resource, &parse_data);
710 747
@@ -806,6 +843,13 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev,
806 struct acpi_resource_irq *irq = &resource->data.irq; 843 struct acpi_resource_irq *irq = &resource->data.irq;
807 int triggering, polarity, shareable; 844 int triggering, polarity, shareable;
808 845
846 if (!pnp_resource_enabled(p)) {
847 irq->interrupt_count = 0;
848 dev_dbg(&dev->dev, " encode irq (%s)\n",
849 p ? "disabled" : "missing");
850 return;
851 }
852
809 decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable); 853 decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable);
810 irq->triggering = triggering; 854 irq->triggering = triggering;
811 irq->polarity = polarity; 855 irq->polarity = polarity;
@@ -828,6 +872,13 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
828 struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq; 872 struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq;
829 int triggering, polarity, shareable; 873 int triggering, polarity, shareable;
830 874
875 if (!pnp_resource_enabled(p)) {
876 extended_irq->interrupt_count = 0;
877 dev_dbg(&dev->dev, " encode extended irq (%s)\n",
878 p ? "disabled" : "missing");
879 return;
880 }
881
831 decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable); 882 decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable);
832 extended_irq->producer_consumer = ACPI_CONSUMER; 883 extended_irq->producer_consumer = ACPI_CONSUMER;
833 extended_irq->triggering = triggering; 884 extended_irq->triggering = triggering;
@@ -848,6 +899,13 @@ static void pnpacpi_encode_dma(struct pnp_dev *dev,
848{ 899{
849 struct acpi_resource_dma *dma = &resource->data.dma; 900 struct acpi_resource_dma *dma = &resource->data.dma;
850 901
902 if (!pnp_resource_enabled(p)) {
903 dma->channel_count = 0;
904 dev_dbg(&dev->dev, " encode dma (%s)\n",
905 p ? "disabled" : "missing");
906 return;
907 }
908
851 /* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */ 909 /* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */
852 switch (p->flags & IORESOURCE_DMA_SPEED_MASK) { 910 switch (p->flags & IORESOURCE_DMA_SPEED_MASK) {
853 case IORESOURCE_DMA_TYPEA: 911 case IORESOURCE_DMA_TYPEA:
@@ -889,17 +947,21 @@ static void pnpacpi_encode_io(struct pnp_dev *dev,
889{ 947{
890 struct acpi_resource_io *io = &resource->data.io; 948 struct acpi_resource_io *io = &resource->data.io;
891 949
892 /* Note: pnp_assign_port will copy pnp_port->flags into p->flags */ 950 if (pnp_resource_enabled(p)) {
893 io->io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR) ? 951 /* Note: pnp_assign_port copies pnp_port->flags into p->flags */
894 ACPI_DECODE_16 : ACPI_DECODE_10; 952 io->io_decode = (p->flags & IORESOURCE_IO_16BIT_ADDR) ?
895 io->minimum = p->start; 953 ACPI_DECODE_16 : ACPI_DECODE_10;
896 io->maximum = p->end; 954 io->minimum = p->start;
897 io->alignment = 0; /* Correct? */ 955 io->maximum = p->end;
898 io->address_length = p->end - p->start + 1; 956 io->alignment = 0; /* Correct? */
899 957 io->address_length = p->end - p->start + 1;
900 dev_dbg(&dev->dev, " encode io %#llx-%#llx decode %#x\n", 958 } else {
901 (unsigned long long) p->start, (unsigned long long) p->end, 959 io->minimum = 0;
902 io->io_decode); 960 io->address_length = 0;
961 }
962
963 dev_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum,
964 io->minimum + io->address_length - 1, io->io_decode);
903} 965}
904 966
905static void pnpacpi_encode_fixed_io(struct pnp_dev *dev, 967static void pnpacpi_encode_fixed_io(struct pnp_dev *dev,
@@ -908,11 +970,16 @@ static void pnpacpi_encode_fixed_io(struct pnp_dev *dev,
908{ 970{
909 struct acpi_resource_fixed_io *fixed_io = &resource->data.fixed_io; 971 struct acpi_resource_fixed_io *fixed_io = &resource->data.fixed_io;
910 972
911 fixed_io->address = p->start; 973 if (pnp_resource_enabled(p)) {
912 fixed_io->address_length = p->end - p->start + 1; 974 fixed_io->address = p->start;
975 fixed_io->address_length = p->end - p->start + 1;
976 } else {
977 fixed_io->address = 0;
978 fixed_io->address_length = 0;
979 }
913 980
914 dev_dbg(&dev->dev, " encode fixed_io %#llx-%#llx\n", 981 dev_dbg(&dev->dev, " encode fixed_io %#x-%#x\n", fixed_io->address,
915 (unsigned long long) p->start, (unsigned long long) p->end); 982 fixed_io->address + fixed_io->address_length - 1);
916} 983}
917 984
918static void pnpacpi_encode_mem24(struct pnp_dev *dev, 985static void pnpacpi_encode_mem24(struct pnp_dev *dev,
@@ -921,17 +988,22 @@ static void pnpacpi_encode_mem24(struct pnp_dev *dev,
921{ 988{
922 struct acpi_resource_memory24 *memory24 = &resource->data.memory24; 989 struct acpi_resource_memory24 *memory24 = &resource->data.memory24;
923 990
924 /* Note: pnp_assign_mem will copy pnp_mem->flags into p->flags */ 991 if (pnp_resource_enabled(p)) {
925 memory24->write_protect = 992 /* Note: pnp_assign_mem copies pnp_mem->flags into p->flags */
926 (p->flags & IORESOURCE_MEM_WRITEABLE) ? 993 memory24->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ?
927 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; 994 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
928 memory24->minimum = p->start; 995 memory24->minimum = p->start;
929 memory24->maximum = p->end; 996 memory24->maximum = p->end;
930 memory24->alignment = 0; 997 memory24->alignment = 0;
931 memory24->address_length = p->end - p->start + 1; 998 memory24->address_length = p->end - p->start + 1;
932 999 } else {
933 dev_dbg(&dev->dev, " encode mem24 %#llx-%#llx write_protect %#x\n", 1000 memory24->minimum = 0;
934 (unsigned long long) p->start, (unsigned long long) p->end, 1001 memory24->address_length = 0;
1002 }
1003
1004 dev_dbg(&dev->dev, " encode mem24 %#x-%#x write_protect %#x\n",
1005 memory24->minimum,
1006 memory24->minimum + memory24->address_length - 1,
935 memory24->write_protect); 1007 memory24->write_protect);
936} 1008}
937 1009
@@ -941,16 +1013,21 @@ static void pnpacpi_encode_mem32(struct pnp_dev *dev,
941{ 1013{
942 struct acpi_resource_memory32 *memory32 = &resource->data.memory32; 1014 struct acpi_resource_memory32 *memory32 = &resource->data.memory32;
943 1015
944 memory32->write_protect = 1016 if (pnp_resource_enabled(p)) {
945 (p->flags & IORESOURCE_MEM_WRITEABLE) ? 1017 memory32->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ?
946 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; 1018 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
947 memory32->minimum = p->start; 1019 memory32->minimum = p->start;
948 memory32->maximum = p->end; 1020 memory32->maximum = p->end;
949 memory32->alignment = 0; 1021 memory32->alignment = 0;
950 memory32->address_length = p->end - p->start + 1; 1022 memory32->address_length = p->end - p->start + 1;
1023 } else {
1024 memory32->minimum = 0;
1025 memory32->alignment = 0;
1026 }
951 1027
952 dev_dbg(&dev->dev, " encode mem32 %#llx-%#llx write_protect %#x\n", 1028 dev_dbg(&dev->dev, " encode mem32 %#x-%#x write_protect %#x\n",
953 (unsigned long long) p->start, (unsigned long long) p->end, 1029 memory32->minimum,
1030 memory32->minimum + memory32->address_length - 1,
954 memory32->write_protect); 1031 memory32->write_protect);
955} 1032}
956 1033
@@ -960,15 +1037,20 @@ static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev,
960{ 1037{
961 struct acpi_resource_fixed_memory32 *fixed_memory32 = &resource->data.fixed_memory32; 1038 struct acpi_resource_fixed_memory32 *fixed_memory32 = &resource->data.fixed_memory32;
962 1039
963 fixed_memory32->write_protect = 1040 if (pnp_resource_enabled(p)) {
964 (p->flags & IORESOURCE_MEM_WRITEABLE) ? 1041 fixed_memory32->write_protect =
965 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; 1042 p->flags & IORESOURCE_MEM_WRITEABLE ?
966 fixed_memory32->address = p->start; 1043 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
967 fixed_memory32->address_length = p->end - p->start + 1; 1044 fixed_memory32->address = p->start;
1045 fixed_memory32->address_length = p->end - p->start + 1;
1046 } else {
1047 fixed_memory32->address = 0;
1048 fixed_memory32->address_length = 0;
1049 }
968 1050
969 dev_dbg(&dev->dev, " encode fixed_mem32 %#llx-%#llx " 1051 dev_dbg(&dev->dev, " encode fixed_mem32 %#x-%#x write_protect %#x\n",
970 "write_protect %#x\n", 1052 fixed_memory32->address,
971 (unsigned long long) p->start, (unsigned long long) p->end, 1053 fixed_memory32->address + fixed_memory32->address_length - 1,
972 fixed_memory32->write_protect); 1054 fixed_memory32->write_protect);
973} 1055}
974 1056
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index 5ff9a4c0447e..ca567671379e 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -216,137 +216,116 @@ len_err:
216 216
217static __init void pnpbios_parse_mem_option(struct pnp_dev *dev, 217static __init void pnpbios_parse_mem_option(struct pnp_dev *dev,
218 unsigned char *p, int size, 218 unsigned char *p, int size,
219 struct pnp_option *option) 219 unsigned int option_flags)
220{ 220{
221 struct pnp_mem *mem; 221 resource_size_t min, max, align, len;
222 222 unsigned char flags;
223 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); 223
224 if (!mem) 224 min = ((p[5] << 8) | p[4]) << 8;
225 return; 225 max = ((p[7] << 8) | p[6]) << 8;
226 mem->min = ((p[5] << 8) | p[4]) << 8; 226 align = (p[9] << 8) | p[8];
227 mem->max = ((p[7] << 8) | p[6]) << 8; 227 len = ((p[11] << 8) | p[10]) << 8;
228 mem->align = (p[9] << 8) | p[8]; 228 flags = p[3];
229 mem->size = ((p[11] << 8) | p[10]) << 8; 229 pnp_register_mem_resource(dev, option_flags, min, max, align, len,
230 mem->flags = p[3]; 230 flags);
231 pnp_register_mem_resource(dev, option, mem);
232} 231}
233 232
234static __init void pnpbios_parse_mem32_option(struct pnp_dev *dev, 233static __init void pnpbios_parse_mem32_option(struct pnp_dev *dev,
235 unsigned char *p, int size, 234 unsigned char *p, int size,
236 struct pnp_option *option) 235 unsigned int option_flags)
237{ 236{
238 struct pnp_mem *mem; 237 resource_size_t min, max, align, len;
239 238 unsigned char flags;
240 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); 239
241 if (!mem) 240 min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
242 return; 241 max = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8];
243 mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 242 align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12];
244 mem->max = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; 243 len = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16];
245 mem->align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12]; 244 flags = p[3];
246 mem->size = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16]; 245 pnp_register_mem_resource(dev, option_flags, min, max, align, len,
247 mem->flags = p[3]; 246 flags);
248 pnp_register_mem_resource(dev, option, mem);
249} 247}
250 248
251static __init void pnpbios_parse_fixed_mem32_option(struct pnp_dev *dev, 249static __init void pnpbios_parse_fixed_mem32_option(struct pnp_dev *dev,
252 unsigned char *p, int size, 250 unsigned char *p, int size,
253 struct pnp_option *option) 251 unsigned int option_flags)
254{ 252{
255 struct pnp_mem *mem; 253 resource_size_t base, len;
256 254 unsigned char flags;
257 mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); 255
258 if (!mem) 256 base = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
259 return; 257 len = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8];
260 mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 258 flags = p[3];
261 mem->size = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; 259 pnp_register_mem_resource(dev, option_flags, base, base, 0, len, flags);
262 mem->align = 0;
263 mem->flags = p[3];
264 pnp_register_mem_resource(dev, option, mem);
265} 260}
266 261
267static __init void pnpbios_parse_irq_option(struct pnp_dev *dev, 262static __init void pnpbios_parse_irq_option(struct pnp_dev *dev,
268 unsigned char *p, int size, 263 unsigned char *p, int size,
269 struct pnp_option *option) 264 unsigned int option_flags)
270{ 265{
271 struct pnp_irq *irq;
272 unsigned long bits; 266 unsigned long bits;
267 pnp_irq_mask_t map;
268 unsigned char flags = IORESOURCE_IRQ_HIGHEDGE;
273 269
274 irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
275 if (!irq)
276 return;
277 bits = (p[2] << 8) | p[1]; 270 bits = (p[2] << 8) | p[1];
278 bitmap_copy(irq->map, &bits, 16); 271
272 bitmap_zero(map.bits, PNP_IRQ_NR);
273 bitmap_copy(map.bits, &bits, 16);
274
279 if (size > 2) 275 if (size > 2)
280 irq->flags = p[3]; 276 flags = p[3];
281 else 277
282 irq->flags = IORESOURCE_IRQ_HIGHEDGE; 278 pnp_register_irq_resource(dev, option_flags, &map, flags);
283 pnp_register_irq_resource(dev, option, irq);
284} 279}
285 280
286static __init void pnpbios_parse_dma_option(struct pnp_dev *dev, 281static __init void pnpbios_parse_dma_option(struct pnp_dev *dev,
287 unsigned char *p, int size, 282 unsigned char *p, int size,
288 struct pnp_option *option) 283 unsigned int option_flags)
289{ 284{
290 struct pnp_dma *dma; 285 pnp_register_dma_resource(dev, option_flags, p[1], p[2]);
291
292 dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL);
293 if (!dma)
294 return;
295 dma->map = p[1];
296 dma->flags = p[2];
297 pnp_register_dma_resource(dev, option, dma);
298} 286}
299 287
300static __init void pnpbios_parse_port_option(struct pnp_dev *dev, 288static __init void pnpbios_parse_port_option(struct pnp_dev *dev,
301 unsigned char *p, int size, 289 unsigned char *p, int size,
302 struct pnp_option *option) 290 unsigned int option_flags)
303{ 291{
304 struct pnp_port *port; 292 resource_size_t min, max, align, len;
305 293 unsigned char flags;
306 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); 294
307 if (!port) 295 min = (p[3] << 8) | p[2];
308 return; 296 max = (p[5] << 8) | p[4];
309 port->min = (p[3] << 8) | p[2]; 297 align = p[6];
310 port->max = (p[5] << 8) | p[4]; 298 len = p[7];
311 port->align = p[6]; 299 flags = p[1] ? IORESOURCE_IO_16BIT_ADDR : 0;
312 port->size = p[7]; 300 pnp_register_port_resource(dev, option_flags, min, max, align, len,
313 port->flags = p[1] ? PNP_PORT_FLAG_16BITADDR : 0; 301 flags);
314 pnp_register_port_resource(dev, option, port);
315} 302}
316 303
317static __init void pnpbios_parse_fixed_port_option(struct pnp_dev *dev, 304static __init void pnpbios_parse_fixed_port_option(struct pnp_dev *dev,
318 unsigned char *p, int size, 305 unsigned char *p, int size,
319 struct pnp_option *option) 306 unsigned int option_flags)
320{ 307{
321 struct pnp_port *port; 308 resource_size_t base, len;
322 309
323 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); 310 base = (p[2] << 8) | p[1];
324 if (!port) 311 len = p[3];
325 return; 312 pnp_register_port_resource(dev, option_flags, base, base, 0, len,
326 port->min = port->max = (p[2] << 8) | p[1]; 313 IORESOURCE_IO_FIXED);
327 port->size = p[3];
328 port->align = 0;
329 port->flags = PNP_PORT_FLAG_FIXED;
330 pnp_register_port_resource(dev, option, port);
331} 314}
332 315
333static __init unsigned char * 316static __init unsigned char *
334pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end, 317pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
335 struct pnp_dev *dev) 318 struct pnp_dev *dev)
336{ 319{
337 unsigned int len, tag; 320 unsigned int len, tag;
338 int priority = 0; 321 int priority;
339 struct pnp_option *option, *option_independent; 322 unsigned int option_flags;
340 323
341 if (!p) 324 if (!p)
342 return NULL; 325 return NULL;
343 326
344 dev_dbg(&dev->dev, "parse resource options\n"); 327 dev_dbg(&dev->dev, "parse resource options\n");
345 328 option_flags = 0;
346 option_independent = option = pnp_register_independent_option(dev);
347 if (!option)
348 return NULL;
349
350 while ((char *)p < (char *)end) { 329 while ((char *)p < (char *)end) {
351 330
352 /* determine the type of tag */ 331 /* determine the type of tag */
@@ -363,37 +342,38 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
363 case LARGE_TAG_MEM: 342 case LARGE_TAG_MEM:
364 if (len != 9) 343 if (len != 9)
365 goto len_err; 344 goto len_err;
366 pnpbios_parse_mem_option(dev, p, len, option); 345 pnpbios_parse_mem_option(dev, p, len, option_flags);
367 break; 346 break;
368 347
369 case LARGE_TAG_MEM32: 348 case LARGE_TAG_MEM32:
370 if (len != 17) 349 if (len != 17)
371 goto len_err; 350 goto len_err;
372 pnpbios_parse_mem32_option(dev, p, len, option); 351 pnpbios_parse_mem32_option(dev, p, len, option_flags);
373 break; 352 break;
374 353
375 case LARGE_TAG_FIXEDMEM32: 354 case LARGE_TAG_FIXEDMEM32:
376 if (len != 9) 355 if (len != 9)
377 goto len_err; 356 goto len_err;
378 pnpbios_parse_fixed_mem32_option(dev, p, len, option); 357 pnpbios_parse_fixed_mem32_option(dev, p, len,
358 option_flags);
379 break; 359 break;
380 360
381 case SMALL_TAG_IRQ: 361 case SMALL_TAG_IRQ:
382 if (len < 2 || len > 3) 362 if (len < 2 || len > 3)
383 goto len_err; 363 goto len_err;
384 pnpbios_parse_irq_option(dev, p, len, option); 364 pnpbios_parse_irq_option(dev, p, len, option_flags);
385 break; 365 break;
386 366
387 case SMALL_TAG_DMA: 367 case SMALL_TAG_DMA:
388 if (len != 2) 368 if (len != 2)
389 goto len_err; 369 goto len_err;
390 pnpbios_parse_dma_option(dev, p, len, option); 370 pnpbios_parse_dma_option(dev, p, len, option_flags);
391 break; 371 break;
392 372
393 case SMALL_TAG_PORT: 373 case SMALL_TAG_PORT:
394 if (len != 7) 374 if (len != 7)
395 goto len_err; 375 goto len_err;
396 pnpbios_parse_port_option(dev, p, len, option); 376 pnpbios_parse_port_option(dev, p, len, option_flags);
397 break; 377 break;
398 378
399 case SMALL_TAG_VENDOR: 379 case SMALL_TAG_VENDOR:
@@ -403,28 +383,23 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
403 case SMALL_TAG_FIXEDPORT: 383 case SMALL_TAG_FIXEDPORT:
404 if (len != 3) 384 if (len != 3)
405 goto len_err; 385 goto len_err;
406 pnpbios_parse_fixed_port_option(dev, p, len, option); 386 pnpbios_parse_fixed_port_option(dev, p, len,
387 option_flags);
407 break; 388 break;
408 389
409 case SMALL_TAG_STARTDEP: 390 case SMALL_TAG_STARTDEP:
410 if (len > 1) 391 if (len > 1)
411 goto len_err; 392 goto len_err;
412 priority = 0x100 | PNP_RES_PRIORITY_ACCEPTABLE; 393 priority = PNP_RES_PRIORITY_ACCEPTABLE;
413 if (len > 0) 394 if (len > 0)
414 priority = 0x100 | p[1]; 395 priority = p[1];
415 option = pnp_register_dependent_option(dev, priority); 396 option_flags = pnp_new_dependent_set(dev, priority);
416 if (!option)
417 return NULL;
418 break; 397 break;
419 398
420 case SMALL_TAG_ENDDEP: 399 case SMALL_TAG_ENDDEP:
421 if (len != 0) 400 if (len != 0)
422 goto len_err; 401 goto len_err;
423 if (option_independent == option) 402 option_flags = 0;
424 dev_warn(&dev->dev, "missing "
425 "SMALL_TAG_STARTDEP tag\n");
426 option = option_independent;
427 dev_dbg(&dev->dev, "end dependent options\n");
428 break; 403 break;
429 404
430 case SMALL_TAG_END: 405 case SMALL_TAG_END:
@@ -526,8 +501,16 @@ len_err:
526static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p, 501static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p,
527 struct resource *res) 502 struct resource *res)
528{ 503{
529 unsigned long base = res->start; 504 unsigned long base;
530 unsigned long len = res->end - res->start + 1; 505 unsigned long len;
506
507 if (pnp_resource_enabled(res)) {
508 base = res->start;
509 len = res->end - res->start + 1;
510 } else {
511 base = 0;
512 len = 0;
513 }
531 514
532 p[4] = (base >> 8) & 0xff; 515 p[4] = (base >> 8) & 0xff;
533 p[5] = ((base >> 8) >> 8) & 0xff; 516 p[5] = ((base >> 8) >> 8) & 0xff;
@@ -536,15 +519,22 @@ static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p,
536 p[10] = (len >> 8) & 0xff; 519 p[10] = (len >> 8) & 0xff;
537 p[11] = ((len >> 8) >> 8) & 0xff; 520 p[11] = ((len >> 8) >> 8) & 0xff;
538 521
539 dev_dbg(&dev->dev, " encode mem %#llx-%#llx\n", 522 dev_dbg(&dev->dev, " encode mem %#lx-%#lx\n", base, base + len - 1);
540 (unsigned long long) res->start, (unsigned long long) res->end);
541} 523}
542 524
543static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p, 525static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
544 struct resource *res) 526 struct resource *res)
545{ 527{
546 unsigned long base = res->start; 528 unsigned long base;
547 unsigned long len = res->end - res->start + 1; 529 unsigned long len;
530
531 if (pnp_resource_enabled(res)) {
532 base = res->start;
533 len = res->end - res->start + 1;
534 } else {
535 base = 0;
536 len = 0;
537 }
548 538
549 p[4] = base & 0xff; 539 p[4] = base & 0xff;
550 p[5] = (base >> 8) & 0xff; 540 p[5] = (base >> 8) & 0xff;
@@ -559,15 +549,22 @@ static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
559 p[18] = (len >> 16) & 0xff; 549 p[18] = (len >> 16) & 0xff;
560 p[19] = (len >> 24) & 0xff; 550 p[19] = (len >> 24) & 0xff;
561 551
562 dev_dbg(&dev->dev, " encode mem32 %#llx-%#llx\n", 552 dev_dbg(&dev->dev, " encode mem32 %#lx-%#lx\n", base, base + len - 1);
563 (unsigned long long) res->start, (unsigned long long) res->end);
564} 553}
565 554
566static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p, 555static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
567 struct resource *res) 556 struct resource *res)
568{ 557{
569 unsigned long base = res->start; 558 unsigned long base;
570 unsigned long len = res->end - res->start + 1; 559 unsigned long len;
560
561 if (pnp_resource_enabled(res)) {
562 base = res->start;
563 len = res->end - res->start + 1;
564 } else {
565 base = 0;
566 len = 0;
567 }
571 568
572 p[4] = base & 0xff; 569 p[4] = base & 0xff;
573 p[5] = (base >> 8) & 0xff; 570 p[5] = (base >> 8) & 0xff;
@@ -578,40 +575,54 @@ static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
578 p[10] = (len >> 16) & 0xff; 575 p[10] = (len >> 16) & 0xff;
579 p[11] = (len >> 24) & 0xff; 576 p[11] = (len >> 24) & 0xff;
580 577
581 dev_dbg(&dev->dev, " encode fixed_mem32 %#llx-%#llx\n", 578 dev_dbg(&dev->dev, " encode fixed_mem32 %#lx-%#lx\n", base,
582 (unsigned long long) res->start, (unsigned long long) res->end); 579 base + len - 1);
583} 580}
584 581
585static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p, 582static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p,
586 struct resource *res) 583 struct resource *res)
587{ 584{
588 unsigned long map = 0; 585 unsigned long map;
586
587 if (pnp_resource_enabled(res))
588 map = 1 << res->start;
589 else
590 map = 0;
589 591
590 map = 1 << res->start;
591 p[1] = map & 0xff; 592 p[1] = map & 0xff;
592 p[2] = (map >> 8) & 0xff; 593 p[2] = (map >> 8) & 0xff;
593 594
594 dev_dbg(&dev->dev, " encode irq %llu\n", 595 dev_dbg(&dev->dev, " encode irq mask %#lx\n", map);
595 (unsigned long long)res->start);
596} 596}
597 597
598static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p, 598static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
599 struct resource *res) 599 struct resource *res)
600{ 600{
601 unsigned long map = 0; 601 unsigned long map;
602
603 if (pnp_resource_enabled(res))
604 map = 1 << res->start;
605 else
606 map = 0;
602 607
603 map = 1 << res->start;
604 p[1] = map & 0xff; 608 p[1] = map & 0xff;
605 609
606 dev_dbg(&dev->dev, " encode dma %llu\n", 610 dev_dbg(&dev->dev, " encode dma mask %#lx\n", map);
607 (unsigned long long)res->start);
608} 611}
609 612
610static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p, 613static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
611 struct resource *res) 614 struct resource *res)
612{ 615{
613 unsigned long base = res->start; 616 unsigned long base;
614 unsigned long len = res->end - res->start + 1; 617 unsigned long len;
618
619 if (pnp_resource_enabled(res)) {
620 base = res->start;
621 len = res->end - res->start + 1;
622 } else {
623 base = 0;
624 len = 0;
625 }
615 626
616 p[2] = base & 0xff; 627 p[2] = base & 0xff;
617 p[3] = (base >> 8) & 0xff; 628 p[3] = (base >> 8) & 0xff;
@@ -619,8 +630,7 @@ static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
619 p[5] = (base >> 8) & 0xff; 630 p[5] = (base >> 8) & 0xff;
620 p[7] = len & 0xff; 631 p[7] = len & 0xff;
621 632
622 dev_dbg(&dev->dev, " encode io %#llx-%#llx\n", 633 dev_dbg(&dev->dev, " encode io %#lx-%#lx\n", base, base + len - 1);
623 (unsigned long long) res->start, (unsigned long long) res->end);
624} 634}
625 635
626static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p, 636static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
@@ -629,12 +639,20 @@ static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
629 unsigned long base = res->start; 639 unsigned long base = res->start;
630 unsigned long len = res->end - res->start + 1; 640 unsigned long len = res->end - res->start + 1;
631 641
642 if (pnp_resource_enabled(res)) {
643 base = res->start;
644 len = res->end - res->start + 1;
645 } else {
646 base = 0;
647 len = 0;
648 }
649
632 p[1] = base & 0xff; 650 p[1] = base & 0xff;
633 p[2] = (base >> 8) & 0xff; 651 p[2] = (base >> 8) & 0xff;
634 p[3] = len & 0xff; 652 p[3] = len & 0xff;
635 653
636 dev_dbg(&dev->dev, " encode fixed_io %#llx-%#llx\n", 654 dev_dbg(&dev->dev, " encode fixed_io %#lx-%#lx\n", base,
637 (unsigned long long) res->start, (unsigned long long) res->end); 655 base + len - 1);
638} 656}
639 657
640static unsigned char *pnpbios_encode_allocated_resource_data(struct pnp_dev 658static unsigned char *pnpbios_encode_allocated_resource_data(struct pnp_dev
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 1ff3bb585ab2..55f55ed72dc7 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -5,6 +5,8 @@
5 * when building up the resource structure for the first time. 5 * when building up the resource structure for the first time.
6 * 6 *
7 * Copyright (c) 2000 Peter Denison <peterd@pnd-pc.demon.co.uk> 7 * Copyright (c) 2000 Peter Denison <peterd@pnd-pc.demon.co.uk>
8 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
9 * Bjorn Helgaas <bjorn.helgaas@hp.com>
8 * 10 *
9 * Heavily based on PCI quirks handling which is 11 * Heavily based on PCI quirks handling which is
10 * 12 *
@@ -20,203 +22,207 @@
20#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
21#include "base.h" 23#include "base.h"
22 24
25static void quirk_awe32_add_ports(struct pnp_dev *dev,
26 struct pnp_option *option,
27 unsigned int offset)
28{
29 struct pnp_option *new_option;
30
31 new_option = kmalloc(sizeof(struct pnp_option), GFP_KERNEL);
32 if (!new_option) {
33 dev_err(&dev->dev, "couldn't add ioport region to option set "
34 "%d\n", pnp_option_set(option));
35 return;
36 }
37
38 *new_option = *option;
39 new_option->u.port.min += offset;
40 new_option->u.port.max += offset;
41 list_add(&new_option->list, &option->list);
42
43 dev_info(&dev->dev, "added ioport region %#llx-%#llx to set %d\n",
44 (unsigned long long) new_option->u.port.min,
45 (unsigned long long) new_option->u.port.max,
46 pnp_option_set(option));
47}
48
23static void quirk_awe32_resources(struct pnp_dev *dev) 49static void quirk_awe32_resources(struct pnp_dev *dev)
24{ 50{
25 struct pnp_port *port, *port2, *port3; 51 struct pnp_option *option;
26 struct pnp_option *res = dev->dependent; 52 unsigned int set = ~0;
27 53
28 /* 54 /*
29 * Unfortunately the isapnp_add_port_resource is too tightly bound 55 * Add two extra ioport regions (at offset 0x400 and 0x800 from the
30 * into the PnP discovery sequence, and cannot be used. Link in the 56 * one given) to every dependent option set.
31 * two extra ports (at offset 0x400 and 0x800 from the one given) by
32 * hand.
33 */ 57 */
34 for (; res; res = res->next) { 58 list_for_each_entry(option, &dev->options, list) {
35 port2 = pnp_alloc(sizeof(struct pnp_port)); 59 if (pnp_option_is_dependent(option) &&
36 if (!port2) 60 pnp_option_set(option) != set) {
37 return; 61 set = pnp_option_set(option);
38 port3 = pnp_alloc(sizeof(struct pnp_port)); 62 quirk_awe32_add_ports(dev, option, 0x800);
39 if (!port3) { 63 quirk_awe32_add_ports(dev, option, 0x400);
40 kfree(port2);
41 return;
42 } 64 }
43 port = res->port;
44 memcpy(port2, port, sizeof(struct pnp_port));
45 memcpy(port3, port, sizeof(struct pnp_port));
46 port->next = port2;
47 port2->next = port3;
48 port2->min += 0x400;
49 port2->max += 0x400;
50 port3->min += 0x800;
51 port3->max += 0x800;
52 dev_info(&dev->dev,
53 "AWE32 quirk - added ioports 0x%lx and 0x%lx\n",
54 (unsigned long)port2->min,
55 (unsigned long)port3->min);
56 } 65 }
57} 66}
58 67
59static void quirk_cmi8330_resources(struct pnp_dev *dev) 68static void quirk_cmi8330_resources(struct pnp_dev *dev)
60{ 69{
61 struct pnp_option *res = dev->dependent; 70 struct pnp_option *option;
62 unsigned long tmp; 71 struct pnp_irq *irq;
63 72 struct pnp_dma *dma;
64 for (; res; res = res->next) {
65
66 struct pnp_irq *irq;
67 struct pnp_dma *dma;
68 73
69 for (irq = res->irq; irq; irq = irq->next) { // Valid irqs are 5, 7, 10 74 list_for_each_entry(option, &dev->options, list) {
70 tmp = 0x04A0; 75 if (!pnp_option_is_dependent(option))
71 bitmap_copy(irq->map, &tmp, 16); // 0000 0100 1010 0000 76 continue;
72 }
73 77
74 for (dma = res->dma; dma; dma = dma->next) // Valid 8bit dma channels are 1,3 78 if (option->type == IORESOURCE_IRQ) {
79 irq = &option->u.irq;
80 bitmap_zero(irq->map.bits, PNP_IRQ_NR);
81 __set_bit(5, irq->map.bits);
82 __set_bit(7, irq->map.bits);
83 __set_bit(10, irq->map.bits);
84 dev_info(&dev->dev, "set possible IRQs in "
85 "option set %d to 5, 7, 10\n",
86 pnp_option_set(option));
87 } else if (option->type == IORESOURCE_DMA) {
88 dma = &option->u.dma;
75 if ((dma->flags & IORESOURCE_DMA_TYPE_MASK) == 89 if ((dma->flags & IORESOURCE_DMA_TYPE_MASK) ==
76 IORESOURCE_DMA_8BIT) 90 IORESOURCE_DMA_8BIT &&
77 dma->map = 0x000A; 91 dma->map != 0x0A) {
92 dev_info(&dev->dev, "changing possible "
93 "DMA channel mask in option set %d "
94 "from %#02x to 0x0A (1, 3)\n",
95 pnp_option_set(option), dma->map);
96 dma->map = 0x0A;
97 }
98 }
78 } 99 }
79 dev_info(&dev->dev, "CMI8330 quirk - forced possible IRQs to 5, 7, 10 "
80 "and DMA channels to 1, 3\n");
81} 100}
82 101
83static void quirk_sb16audio_resources(struct pnp_dev *dev) 102static void quirk_sb16audio_resources(struct pnp_dev *dev)
84{ 103{
104 struct pnp_option *option;
105 unsigned int prev_option_flags = ~0, n = 0;
85 struct pnp_port *port; 106 struct pnp_port *port;
86 struct pnp_option *res = dev->dependent;
87 int changed = 0;
88 107
89 /* 108 /*
90 * The default range on the mpu port for these devices is 0x388-0x388. 109 * The default range on the OPL port for these devices is 0x388-0x388.
91 * Here we increase that range so that two such cards can be 110 * Here we increase that range so that two such cards can be
92 * auto-configured. 111 * auto-configured.
93 */ 112 */
113 list_for_each_entry(option, &dev->options, list) {
114 if (prev_option_flags != option->flags) {
115 prev_option_flags = option->flags;
116 n = 0;
117 }
94 118
95 for (; res; res = res->next) { 119 if (pnp_option_is_dependent(option) &&
96 port = res->port; 120 option->type == IORESOURCE_IO) {
97 if (!port) 121 n++;
98 continue; 122 port = &option->u.port;
99 port = port->next; 123 if (n == 3 && port->min == port->max) {
100 if (!port) 124 port->max += 0x70;
101 continue; 125 dev_info(&dev->dev, "increased option port "
102 port = port->next; 126 "range from %#llx-%#llx to "
103 if (!port) 127 "%#llx-%#llx\n",
104 continue; 128 (unsigned long long) port->min,
105 if (port->min != port->max) 129 (unsigned long long) port->min,
106 continue; 130 (unsigned long long) port->min,
107 port->max += 0x70; 131 (unsigned long long) port->max);
108 changed = 1; 132 }
133 }
109 } 134 }
110 if (changed)
111 dev_info(&dev->dev, "SB audio device quirk - increased port range\n");
112} 135}
113 136
114static struct pnp_option *quirk_isapnp_mpu_options(struct pnp_dev *dev) 137static struct pnp_option *pnp_clone_dependent_set(struct pnp_dev *dev,
138 unsigned int set)
115{ 139{
116 struct pnp_option *head = NULL; 140 struct pnp_option *tail = NULL, *first_new_option = NULL;
117 struct pnp_option *prev = NULL; 141 struct pnp_option *option, *new_option;
118 struct pnp_option *res; 142 unsigned int flags;
119
120 /*
121 * Build a functional IRQ-less variant of each MPU option.
122 */
123
124 for (res = dev->dependent; res; res = res->next) {
125 struct pnp_option *curr;
126 struct pnp_port *port;
127 struct pnp_port *copy;
128 143
129 port = res->port; 144 list_for_each_entry(option, &dev->options, list) {
130 if (!port || !res->irq) 145 if (pnp_option_is_dependent(option))
131 continue; 146 tail = option;
147 }
148 if (!tail) {
149 dev_err(&dev->dev, "no dependent option sets\n");
150 return NULL;
151 }
132 152
133 copy = pnp_alloc(sizeof *copy); 153 flags = pnp_new_dependent_set(dev, PNP_RES_PRIORITY_FUNCTIONAL);
134 if (!copy) 154 list_for_each_entry(option, &dev->options, list) {
135 break; 155 if (pnp_option_is_dependent(option) &&
156 pnp_option_set(option) == set) {
157 new_option = kmalloc(sizeof(struct pnp_option),
158 GFP_KERNEL);
159 if (!new_option) {
160 dev_err(&dev->dev, "couldn't clone dependent "
161 "set %d\n", set);
162 return NULL;
163 }
136 164
137 copy->min = port->min; 165 *new_option = *option;
138 copy->max = port->max; 166 new_option->flags = flags;
139 copy->align = port->align; 167 if (!first_new_option)
140 copy->size = port->size; 168 first_new_option = new_option;
141 copy->flags = port->flags;
142 169
143 curr = pnp_build_option(PNP_RES_PRIORITY_FUNCTIONAL); 170 list_add(&new_option->list, &tail->list);
144 if (!curr) { 171 tail = new_option;
145 kfree(copy);
146 break;
147 } 172 }
148 curr->port = copy;
149
150 if (prev)
151 prev->next = curr;
152 else
153 head = curr;
154 prev = curr;
155 } 173 }
156 if (head)
157 dev_info(&dev->dev, "adding IRQ-less MPU options\n");
158 174
159 return head; 175 return first_new_option;
160} 176}
161 177
162static void quirk_ad1815_mpu_resources(struct pnp_dev *dev) 178
179static void quirk_add_irq_optional_dependent_sets(struct pnp_dev *dev)
163{ 180{
164 struct pnp_option *res; 181 struct pnp_option *new_option;
182 unsigned int num_sets, i, set;
165 struct pnp_irq *irq; 183 struct pnp_irq *irq;
166 184
167 /* 185 num_sets = dev->num_dependent_sets;
168 * Distribute the independent IRQ over the dependent options 186 for (i = 0; i < num_sets; i++) {
169 */ 187 new_option = pnp_clone_dependent_set(dev, i);
170 188 if (!new_option)
171 res = dev->independent; 189 return;
172 if (!res)
173 return;
174
175 irq = res->irq;
176 if (!irq || irq->next)
177 return;
178
179 res = dev->dependent;
180 if (!res)
181 return;
182
183 while (1) {
184 struct pnp_irq *copy;
185
186 copy = pnp_alloc(sizeof *copy);
187 if (!copy)
188 break;
189
190 memcpy(copy->map, irq->map, sizeof copy->map);
191 copy->flags = irq->flags;
192 190
193 copy->next = res->irq; /* Yes, this is NULL */ 191 set = pnp_option_set(new_option);
194 res->irq = copy; 192 while (new_option && pnp_option_set(new_option) == set) {
193 if (new_option->type == IORESOURCE_IRQ) {
194 irq = &new_option->u.irq;
195 irq->flags |= IORESOURCE_IRQ_OPTIONAL;
196 }
197 dbg_pnp_show_option(dev, new_option);
198 new_option = list_entry(new_option->list.next,
199 struct pnp_option, list);
200 }
195 201
196 if (!res->next) 202 dev_info(&dev->dev, "added dependent option set %d (same as "
197 break; 203 "set %d except IRQ optional)\n", set, i);
198 res = res->next;
199 } 204 }
200 kfree(irq);
201
202 res->next = quirk_isapnp_mpu_options(dev);
203
204 res = dev->independent;
205 res->irq = NULL;
206} 205}
207 206
208static void quirk_isapnp_mpu_resources(struct pnp_dev *dev) 207static void quirk_ad1815_mpu_resources(struct pnp_dev *dev)
209{ 208{
210 struct pnp_option *res; 209 struct pnp_option *option;
210 struct pnp_irq *irq = NULL;
211 unsigned int independent_irqs = 0;
212
213 list_for_each_entry(option, &dev->options, list) {
214 if (option->type == IORESOURCE_IRQ &&
215 !pnp_option_is_dependent(option)) {
216 independent_irqs++;
217 irq = &option->u.irq;
218 }
219 }
211 220
212 res = dev->dependent; 221 if (independent_irqs != 1)
213 if (!res)
214 return; 222 return;
215 223
216 while (res->next) 224 irq->flags |= IORESOURCE_IRQ_OPTIONAL;
217 res = res->next; 225 dev_info(&dev->dev, "made independent IRQ optional\n");
218
219 res->next = quirk_isapnp_mpu_options(dev);
220} 226}
221 227
222#include <linux/pci.h> 228#include <linux/pci.h>
@@ -248,8 +254,7 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
248 for (j = 0; 254 for (j = 0;
249 (res = pnp_get_resource(dev, IORESOURCE_MEM, j)); 255 (res = pnp_get_resource(dev, IORESOURCE_MEM, j));
250 j++) { 256 j++) {
251 if (res->flags & IORESOURCE_UNSET || 257 if (res->start == 0 && res->end == 0)
252 (res->start == 0 && res->end == 0))
253 continue; 258 continue;
254 259
255 pnp_start = res->start; 260 pnp_start = res->start;
@@ -312,10 +317,10 @@ static struct pnp_fixup pnp_fixups[] = {
312 {"CTL0043", quirk_sb16audio_resources}, 317 {"CTL0043", quirk_sb16audio_resources},
313 {"CTL0044", quirk_sb16audio_resources}, 318 {"CTL0044", quirk_sb16audio_resources},
314 {"CTL0045", quirk_sb16audio_resources}, 319 {"CTL0045", quirk_sb16audio_resources},
315 /* Add IRQ-less MPU options */ 320 /* Add IRQ-optional MPU options */
316 {"ADS7151", quirk_ad1815_mpu_resources}, 321 {"ADS7151", quirk_ad1815_mpu_resources},
317 {"ADS7181", quirk_isapnp_mpu_resources}, 322 {"ADS7181", quirk_add_irq_optional_dependent_sets},
318 {"AZT0002", quirk_isapnp_mpu_resources}, 323 {"AZT0002", quirk_add_irq_optional_dependent_sets},
319 /* PnP resources that might overlap PCI BARs */ 324 /* PnP resources that might overlap PCI BARs */
320 {"PNP0c01", quirk_system_pci_resources}, 325 {"PNP0c01", quirk_system_pci_resources},
321 {"PNP0c02", quirk_system_pci_resources}, 326 {"PNP0c02", quirk_system_pci_resources},
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 390b50096e30..4cfe3a1efdfb 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -3,6 +3,8 @@
3 * 3 *
4 * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz> 4 * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz>
5 * Copyright 2003 Adam Belay <ambx1@neo.rr.com> 5 * Copyright 2003 Adam Belay <ambx1@neo.rr.com>
6 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
7 * Bjorn Helgaas <bjorn.helgaas@hp.com>
6 */ 8 */
7 9
8#include <linux/module.h> 10#include <linux/module.h>
@@ -28,201 +30,121 @@ static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some
28 * option registration 30 * option registration
29 */ 31 */
30 32
31struct pnp_option *pnp_build_option(int priority) 33struct pnp_option *pnp_build_option(struct pnp_dev *dev, unsigned long type,
34 unsigned int option_flags)
32{ 35{
33 struct pnp_option *option = pnp_alloc(sizeof(struct pnp_option)); 36 struct pnp_option *option;
34 37
38 option = kzalloc(sizeof(struct pnp_option), GFP_KERNEL);
35 if (!option) 39 if (!option)
36 return NULL; 40 return NULL;
37 41
38 option->priority = priority & 0xff; 42 option->flags = option_flags;
39 /* make sure the priority is valid */ 43 option->type = type;
40 if (option->priority > PNP_RES_PRIORITY_FUNCTIONAL)
41 option->priority = PNP_RES_PRIORITY_INVALID;
42
43 return option;
44}
45
46struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev)
47{
48 struct pnp_option *option;
49
50 option = pnp_build_option(PNP_RES_PRIORITY_PREFERRED);
51
52 /* this should never happen but if it does we'll try to continue */
53 if (dev->independent)
54 dev_err(&dev->dev, "independent resource already registered\n");
55 dev->independent = option;
56 44
57 dev_dbg(&dev->dev, "new independent option\n"); 45 list_add_tail(&option->list, &dev->options);
58 return option; 46 return option;
59} 47}
60 48
61struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev, 49int pnp_register_irq_resource(struct pnp_dev *dev, unsigned int option_flags,
62 int priority) 50 pnp_irq_mask_t *map, unsigned char flags)
63{ 51{
64 struct pnp_option *option; 52 struct pnp_option *option;
53 struct pnp_irq *irq;
65 54
66 option = pnp_build_option(priority); 55 option = pnp_build_option(dev, IORESOURCE_IRQ, option_flags);
67 56 if (!option)
68 if (dev->dependent) { 57 return -ENOMEM;
69 struct pnp_option *parent = dev->dependent;
70 while (parent->next)
71 parent = parent->next;
72 parent->next = option;
73 } else
74 dev->dependent = option;
75
76 dev_dbg(&dev->dev, "new dependent option (priority %#x)\n", priority);
77 return option;
78}
79
80int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option,
81 struct pnp_irq *data)
82{
83 struct pnp_irq *ptr;
84#ifdef DEBUG
85 char buf[PNP_IRQ_NR]; /* hex-encoded, so this is overkill but safe */
86#endif
87 58
88 ptr = option->irq; 59 irq = &option->u.irq;
89 while (ptr && ptr->next) 60 irq->map = *map;
90 ptr = ptr->next; 61 irq->flags = flags;
91 if (ptr)
92 ptr->next = data;
93 else
94 option->irq = data;
95 62
96#ifdef CONFIG_PCI 63#ifdef CONFIG_PCI
97 { 64 {
98 int i; 65 int i;
99 66
100 for (i = 0; i < 16; i++) 67 for (i = 0; i < 16; i++)
101 if (test_bit(i, data->map)) 68 if (test_bit(i, irq->map.bits))
102 pcibios_penalize_isa_irq(i, 0); 69 pcibios_penalize_isa_irq(i, 0);
103 } 70 }
104#endif 71#endif
105 72
106#ifdef DEBUG 73 dbg_pnp_show_option(dev, option);
107 bitmap_scnprintf(buf, sizeof(buf), data->map, PNP_IRQ_NR);
108 dev_dbg(&dev->dev, " irq bitmask %s flags %#x\n", buf,
109 data->flags);
110#endif
111 return 0; 74 return 0;
112} 75}
113 76
114int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option, 77int pnp_register_dma_resource(struct pnp_dev *dev, unsigned int option_flags,
115 struct pnp_dma *data) 78 unsigned char map, unsigned char flags)
116{ 79{
117 struct pnp_dma *ptr; 80 struct pnp_option *option;
118 81 struct pnp_dma *dma;
119 ptr = option->dma;
120 while (ptr && ptr->next)
121 ptr = ptr->next;
122 if (ptr)
123 ptr->next = data;
124 else
125 option->dma = data;
126
127 dev_dbg(&dev->dev, " dma bitmask %#x flags %#x\n", data->map,
128 data->flags);
129 return 0;
130}
131 82
132int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option, 83 option = pnp_build_option(dev, IORESOURCE_DMA, option_flags);
133 struct pnp_port *data) 84 if (!option)
134{ 85 return -ENOMEM;
135 struct pnp_port *ptr;
136
137 ptr = option->port;
138 while (ptr && ptr->next)
139 ptr = ptr->next;
140 if (ptr)
141 ptr->next = data;
142 else
143 option->port = data;
144
145 dev_dbg(&dev->dev, " io "
146 "min %#x max %#x align %d size %d flags %#x\n",
147 data->min, data->max, data->align, data->size, data->flags);
148 return 0;
149}
150 86
151int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option, 87 dma = &option->u.dma;
152 struct pnp_mem *data) 88 dma->map = map;
153{ 89 dma->flags = flags;
154 struct pnp_mem *ptr; 90
155 91 dbg_pnp_show_option(dev, option);
156 ptr = option->mem;
157 while (ptr && ptr->next)
158 ptr = ptr->next;
159 if (ptr)
160 ptr->next = data;
161 else
162 option->mem = data;
163
164 dev_dbg(&dev->dev, " mem "
165 "min %#x max %#x align %d size %d flags %#x\n",
166 data->min, data->max, data->align, data->size, data->flags);
167 return 0; 92 return 0;
168} 93}
169 94
170static void pnp_free_port(struct pnp_port *port) 95int pnp_register_port_resource(struct pnp_dev *dev, unsigned int option_flags,
96 resource_size_t min, resource_size_t max,
97 resource_size_t align, resource_size_t size,
98 unsigned char flags)
171{ 99{
172 struct pnp_port *next; 100 struct pnp_option *option;
101 struct pnp_port *port;
173 102
174 while (port) { 103 option = pnp_build_option(dev, IORESOURCE_IO, option_flags);
175 next = port->next; 104 if (!option)
176 kfree(port); 105 return -ENOMEM;
177 port = next;
178 }
179}
180 106
181static void pnp_free_irq(struct pnp_irq *irq) 107 port = &option->u.port;
182{ 108 port->min = min;
183 struct pnp_irq *next; 109 port->max = max;
110 port->align = align;
111 port->size = size;
112 port->flags = flags;
184 113
185 while (irq) { 114 dbg_pnp_show_option(dev, option);
186 next = irq->next; 115 return 0;
187 kfree(irq);
188 irq = next;
189 }
190} 116}
191 117
192static void pnp_free_dma(struct pnp_dma *dma) 118int pnp_register_mem_resource(struct pnp_dev *dev, unsigned int option_flags,
119 resource_size_t min, resource_size_t max,
120 resource_size_t align, resource_size_t size,
121 unsigned char flags)
193{ 122{
194 struct pnp_dma *next; 123 struct pnp_option *option;
124 struct pnp_mem *mem;
195 125
196 while (dma) { 126 option = pnp_build_option(dev, IORESOURCE_MEM, option_flags);
197 next = dma->next; 127 if (!option)
198 kfree(dma); 128 return -ENOMEM;
199 dma = next;
200 }
201}
202 129
203static void pnp_free_mem(struct pnp_mem *mem) 130 mem = &option->u.mem;
204{ 131 mem->min = min;
205 struct pnp_mem *next; 132 mem->max = max;
133 mem->align = align;
134 mem->size = size;
135 mem->flags = flags;
206 136
207 while (mem) { 137 dbg_pnp_show_option(dev, option);
208 next = mem->next; 138 return 0;
209 kfree(mem);
210 mem = next;
211 }
212} 139}
213 140
214void pnp_free_option(struct pnp_option *option) 141void pnp_free_options(struct pnp_dev *dev)
215{ 142{
216 struct pnp_option *next; 143 struct pnp_option *option, *tmp;
217 144
218 while (option) { 145 list_for_each_entry_safe(option, tmp, &dev->options, list) {
219 next = option->next; 146 list_del(&option->list);
220 pnp_free_port(option->port);
221 pnp_free_irq(option->irq);
222 pnp_free_dma(option->dma);
223 pnp_free_mem(option->mem);
224 kfree(option); 147 kfree(option);
225 option = next;
226 } 148 }
227} 149}
228 150
@@ -237,7 +159,7 @@ void pnp_free_option(struct pnp_option *option)
237 !((*(enda) < *(startb)) || (*(endb) < *(starta))) 159 !((*(enda) < *(startb)) || (*(endb) < *(starta)))
238 160
239#define cannot_compare(flags) \ 161#define cannot_compare(flags) \
240((flags) & (IORESOURCE_UNSET | IORESOURCE_DISABLED)) 162((flags) & IORESOURCE_DISABLED)
241 163
242int pnp_check_port(struct pnp_dev *dev, struct resource *res) 164int pnp_check_port(struct pnp_dev *dev, struct resource *res)
243{ 165{
@@ -364,6 +286,61 @@ static irqreturn_t pnp_test_handler(int irq, void *dev_id)
364 return IRQ_HANDLED; 286 return IRQ_HANDLED;
365} 287}
366 288
289#ifdef CONFIG_PCI
290static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
291 unsigned int irq)
292{
293 u32 class;
294 u8 progif;
295
296 if (pci->irq == irq) {
297 dev_dbg(&pnp->dev, "device %s using irq %d\n",
298 pci_name(pci), irq);
299 return 1;
300 }
301
302 /*
303 * See pci_setup_device() and ata_pci_sff_activate_host() for
304 * similar IDE legacy detection.
305 */
306 pci_read_config_dword(pci, PCI_CLASS_REVISION, &class);
307 class >>= 8; /* discard revision ID */
308 progif = class & 0xff;
309 class >>= 8;
310
311 if (class == PCI_CLASS_STORAGE_IDE) {
312 /*
313 * Unless both channels are native-PCI mode only,
314 * treat the compatibility IRQs as busy.
315 */
316 if ((progif & 0x5) != 0x5)
317 if (pci_get_legacy_ide_irq(pci, 0) == irq ||
318 pci_get_legacy_ide_irq(pci, 1) == irq) {
319 dev_dbg(&pnp->dev, "legacy IDE device %s "
320 "using irq %d\n", pci_name(pci), irq);
321 return 1;
322 }
323 }
324
325 return 0;
326}
327#endif
328
329static int pci_uses_irq(struct pnp_dev *pnp, unsigned int irq)
330{
331#ifdef CONFIG_PCI
332 struct pci_dev *pci = NULL;
333
334 for_each_pci_dev(pci) {
335 if (pci_dev_uses_irq(pnp, pci, irq)) {
336 pci_dev_put(pci);
337 return 1;
338 }
339 }
340#endif
341 return 0;
342}
343
367int pnp_check_irq(struct pnp_dev *dev, struct resource *res) 344int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
368{ 345{
369 int i; 346 int i;
@@ -395,18 +372,9 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
395 } 372 }
396 } 373 }
397 374
398#ifdef CONFIG_PCI
399 /* check if the resource is being used by a pci device */ 375 /* check if the resource is being used by a pci device */
400 { 376 if (pci_uses_irq(dev, *irq))
401 struct pci_dev *pci = NULL; 377 return 0;
402 for_each_pci_dev(pci) {
403 if (pci->irq == *irq) {
404 pci_dev_put(pci);
405 return 0;
406 }
407 }
408 }
409#endif
410 378
411 /* check if the resource is already in use, skip if the 379 /* check if the resource is already in use, skip if the
412 * device is active because it itself may be in use */ 380 * device is active because it itself may be in use */
@@ -499,81 +467,37 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
499#endif 467#endif
500} 468}
501 469
502struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev, 470int pnp_resource_type(struct resource *res)
503 unsigned int type, unsigned int num)
504{ 471{
505 struct pnp_resource_table *res = dev->res; 472 return res->flags & (IORESOURCE_IO | IORESOURCE_MEM |
506 473 IORESOURCE_IRQ | IORESOURCE_DMA);
507 switch (type) {
508 case IORESOURCE_IO:
509 if (num >= PNP_MAX_PORT)
510 return NULL;
511 return &res->port[num];
512 case IORESOURCE_MEM:
513 if (num >= PNP_MAX_MEM)
514 return NULL;
515 return &res->mem[num];
516 case IORESOURCE_IRQ:
517 if (num >= PNP_MAX_IRQ)
518 return NULL;
519 return &res->irq[num];
520 case IORESOURCE_DMA:
521 if (num >= PNP_MAX_DMA)
522 return NULL;
523 return &res->dma[num];
524 }
525 return NULL;
526} 474}
527 475
528struct resource *pnp_get_resource(struct pnp_dev *dev, 476struct resource *pnp_get_resource(struct pnp_dev *dev,
529 unsigned int type, unsigned int num) 477 unsigned int type, unsigned int num)
530{ 478{
531 struct pnp_resource *pnp_res; 479 struct pnp_resource *pnp_res;
480 struct resource *res;
532 481
533 pnp_res = pnp_get_pnp_resource(dev, type, num); 482 list_for_each_entry(pnp_res, &dev->resources, list) {
534 if (pnp_res) 483 res = &pnp_res->res;
535 return &pnp_res->res; 484 if (pnp_resource_type(res) == type && num-- == 0)
536 485 return res;
486 }
537 return NULL; 487 return NULL;
538} 488}
539EXPORT_SYMBOL(pnp_get_resource); 489EXPORT_SYMBOL(pnp_get_resource);
540 490
541static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev, int type) 491static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev)
542{ 492{
543 struct pnp_resource *pnp_res; 493 struct pnp_resource *pnp_res;
544 int i;
545 494
546 switch (type) { 495 pnp_res = kzalloc(sizeof(struct pnp_resource), GFP_KERNEL);
547 case IORESOURCE_IO: 496 if (!pnp_res)
548 for (i = 0; i < PNP_MAX_PORT; i++) { 497 return NULL;
549 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, i); 498
550 if (pnp_res && !pnp_resource_valid(&pnp_res->res)) 499 list_add_tail(&pnp_res->list, &dev->resources);
551 return pnp_res; 500 return pnp_res;
552 }
553 break;
554 case IORESOURCE_MEM:
555 for (i = 0; i < PNP_MAX_MEM; i++) {
556 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, i);
557 if (pnp_res && !pnp_resource_valid(&pnp_res->res))
558 return pnp_res;
559 }
560 break;
561 case IORESOURCE_IRQ:
562 for (i = 0; i < PNP_MAX_IRQ; i++) {
563 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, i);
564 if (pnp_res && !pnp_resource_valid(&pnp_res->res))
565 return pnp_res;
566 }
567 break;
568 case IORESOURCE_DMA:
569 for (i = 0; i < PNP_MAX_DMA; i++) {
570 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, i);
571 if (pnp_res && !pnp_resource_valid(&pnp_res->res))
572 return pnp_res;
573 }
574 break;
575 }
576 return NULL;
577} 501}
578 502
579struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, 503struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
@@ -581,15 +505,10 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
581{ 505{
582 struct pnp_resource *pnp_res; 506 struct pnp_resource *pnp_res;
583 struct resource *res; 507 struct resource *res;
584 static unsigned char warned;
585 508
586 pnp_res = pnp_new_resource(dev, IORESOURCE_IRQ); 509 pnp_res = pnp_new_resource(dev);
587 if (!pnp_res) { 510 if (!pnp_res) {
588 if (!warned) { 511 dev_err(&dev->dev, "can't add resource for IRQ %d\n", irq);
589 dev_err(&dev->dev, "can't add resource for IRQ %d\n",
590 irq);
591 warned = 1;
592 }
593 return NULL; 512 return NULL;
594 } 513 }
595 514
@@ -607,15 +526,10 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
607{ 526{
608 struct pnp_resource *pnp_res; 527 struct pnp_resource *pnp_res;
609 struct resource *res; 528 struct resource *res;
610 static unsigned char warned;
611 529
612 pnp_res = pnp_new_resource(dev, IORESOURCE_DMA); 530 pnp_res = pnp_new_resource(dev);
613 if (!pnp_res) { 531 if (!pnp_res) {
614 if (!warned) { 532 dev_err(&dev->dev, "can't add resource for DMA %d\n", dma);
615 dev_err(&dev->dev, "can't add resource for DMA %d\n",
616 dma);
617 warned = 1;
618 }
619 return NULL; 533 return NULL;
620 } 534 }
621 535
@@ -634,16 +548,12 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
634{ 548{
635 struct pnp_resource *pnp_res; 549 struct pnp_resource *pnp_res;
636 struct resource *res; 550 struct resource *res;
637 static unsigned char warned;
638 551
639 pnp_res = pnp_new_resource(dev, IORESOURCE_IO); 552 pnp_res = pnp_new_resource(dev);
640 if (!pnp_res) { 553 if (!pnp_res) {
641 if (!warned) { 554 dev_err(&dev->dev, "can't add resource for IO %#llx-%#llx\n",
642 dev_err(&dev->dev, "can't add resource for IO " 555 (unsigned long long) start,
643 "%#llx-%#llx\n",(unsigned long long) start, 556 (unsigned long long) end);
644 (unsigned long long) end);
645 warned = 1;
646 }
647 return NULL; 557 return NULL;
648 } 558 }
649 559
@@ -663,16 +573,12 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
663{ 573{
664 struct pnp_resource *pnp_res; 574 struct pnp_resource *pnp_res;
665 struct resource *res; 575 struct resource *res;
666 static unsigned char warned;
667 576
668 pnp_res = pnp_new_resource(dev, IORESOURCE_MEM); 577 pnp_res = pnp_new_resource(dev);
669 if (!pnp_res) { 578 if (!pnp_res) {
670 if (!warned) { 579 dev_err(&dev->dev, "can't add resource for MEM %#llx-%#llx\n",
671 dev_err(&dev->dev, "can't add resource for MEM " 580 (unsigned long long) start,
672 "%#llx-%#llx\n",(unsigned long long) start, 581 (unsigned long long) end);
673 (unsigned long long) end);
674 warned = 1;
675 }
676 return NULL; 582 return NULL;
677 } 583 }
678 584
@@ -686,6 +592,52 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
686 return pnp_res; 592 return pnp_res;
687} 593}
688 594
595/*
596 * Determine whether the specified resource is a possible configuration
597 * for this device.
598 */
599int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start,
600 resource_size_t size)
601{
602 struct pnp_option *option;
603 struct pnp_port *port;
604 struct pnp_mem *mem;
605 struct pnp_irq *irq;
606 struct pnp_dma *dma;
607
608 list_for_each_entry(option, &dev->options, list) {
609 if (option->type != type)
610 continue;
611
612 switch (option->type) {
613 case IORESOURCE_IO:
614 port = &option->u.port;
615 if (port->min == start && port->size == size)
616 return 1;
617 break;
618 case IORESOURCE_MEM:
619 mem = &option->u.mem;
620 if (mem->min == start && mem->size == size)
621 return 1;
622 break;
623 case IORESOURCE_IRQ:
624 irq = &option->u.irq;
625 if (start < PNP_IRQ_NR &&
626 test_bit(start, irq->map.bits))
627 return 1;
628 break;
629 case IORESOURCE_DMA:
630 dma = &option->u.dma;
631 if (dma->map & (1 << start))
632 return 1;
633 break;
634 }
635 }
636
637 return 0;
638}
639EXPORT_SYMBOL(pnp_possible_config);
640
689/* format is: pnp_reserve_irq=irq1[,irq2] .... */ 641/* format is: pnp_reserve_irq=irq1[,irq2] .... */
690static int __init pnp_setup_reserve_irq(char *str) 642static int __init pnp_setup_reserve_irq(char *str)
691{ 643{
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index 95b076c18c07..bbf78ef4ba02 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -2,6 +2,8 @@
2 * support.c - standard functions for the use of pnp protocol drivers 2 * support.c - standard functions for the use of pnp protocol drivers
3 * 3 *
4 * Copyright 2003 Adam Belay <ambx1@neo.rr.com> 4 * Copyright 2003 Adam Belay <ambx1@neo.rr.com>
5 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
6 * Bjorn Helgaas <bjorn.helgaas@hp.com>
5 */ 7 */
6 8
7#include <linux/module.h> 9#include <linux/module.h>
@@ -16,6 +18,10 @@
16 */ 18 */
17int pnp_is_active(struct pnp_dev *dev) 19int pnp_is_active(struct pnp_dev *dev)
18{ 20{
21 /*
22 * I don't think this is very reliable because pnp_disable_dev()
23 * only clears out auto-assigned resources.
24 */
19 if (!pnp_port_start(dev, 0) && pnp_port_len(dev, 0) <= 1 && 25 if (!pnp_port_start(dev, 0) && pnp_port_len(dev, 0) <= 1 &&
20 !pnp_mem_start(dev, 0) && pnp_mem_len(dev, 0) <= 1 && 26 !pnp_mem_start(dev, 0) && pnp_mem_len(dev, 0) <= 1 &&
21 pnp_irq(dev, 0) == -1 && pnp_dma(dev, 0) == -1) 27 pnp_irq(dev, 0) == -1 && pnp_dma(dev, 0) == -1)
@@ -52,39 +58,154 @@ void pnp_eisa_id_to_string(u32 id, char *str)
52 str[7] = '\0'; 58 str[7] = '\0';
53} 59}
54 60
61char *pnp_resource_type_name(struct resource *res)
62{
63 switch (pnp_resource_type(res)) {
64 case IORESOURCE_IO:
65 return "io";
66 case IORESOURCE_MEM:
67 return "mem";
68 case IORESOURCE_IRQ:
69 return "irq";
70 case IORESOURCE_DMA:
71 return "dma";
72 }
73 return NULL;
74}
75
55void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc) 76void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
56{ 77{
57#ifdef DEBUG 78#ifdef DEBUG
79 char buf[128];
80 int len = 0;
81 struct pnp_resource *pnp_res;
58 struct resource *res; 82 struct resource *res;
59 int i;
60 83
61 dev_dbg(&dev->dev, "current resources: %s\n", desc); 84 if (list_empty(&dev->resources)) {
62 85 dev_dbg(&dev->dev, "%s: no current resources\n", desc);
63 for (i = 0; i < PNP_MAX_IRQ; i++) { 86 return;
64 res = pnp_get_resource(dev, IORESOURCE_IRQ, i);
65 if (res && !(res->flags & IORESOURCE_UNSET))
66 dev_dbg(&dev->dev, " irq %lld flags %#lx\n",
67 (unsigned long long) res->start, res->flags);
68 } 87 }
69 for (i = 0; i < PNP_MAX_DMA; i++) { 88
70 res = pnp_get_resource(dev, IORESOURCE_DMA, i); 89 dev_dbg(&dev->dev, "%s: current resources:\n", desc);
71 if (res && !(res->flags & IORESOURCE_UNSET)) 90 list_for_each_entry(pnp_res, &dev->resources, list) {
72 dev_dbg(&dev->dev, " dma %lld flags %#lx\n", 91 res = &pnp_res->res;
73 (unsigned long long) res->start, res->flags); 92
93 len += snprintf(buf + len, sizeof(buf) - len, " %-3s ",
94 pnp_resource_type_name(res));
95
96 if (res->flags & IORESOURCE_DISABLED) {
97 dev_dbg(&dev->dev, "%sdisabled\n", buf);
98 continue;
99 }
100
101 switch (pnp_resource_type(res)) {
102 case IORESOURCE_IO:
103 case IORESOURCE_MEM:
104 len += snprintf(buf + len, sizeof(buf) - len,
105 "%#llx-%#llx flags %#lx",
106 (unsigned long long) res->start,
107 (unsigned long long) res->end,
108 res->flags);
109 break;
110 case IORESOURCE_IRQ:
111 case IORESOURCE_DMA:
112 len += snprintf(buf + len, sizeof(buf) - len,
113 "%lld flags %#lx",
114 (unsigned long long) res->start,
115 res->flags);
116 break;
117 }
118 dev_dbg(&dev->dev, "%s\n", buf);
74 } 119 }
75 for (i = 0; i < PNP_MAX_PORT; i++) { 120#endif
76 res = pnp_get_resource(dev, IORESOURCE_IO, i); 121}
77 if (res && !(res->flags & IORESOURCE_UNSET)) 122
78 dev_dbg(&dev->dev, " io %#llx-%#llx flags %#lx\n", 123char *pnp_option_priority_name(struct pnp_option *option)
79 (unsigned long long) res->start, 124{
80 (unsigned long long) res->end, res->flags); 125 switch (pnp_option_priority(option)) {
126 case PNP_RES_PRIORITY_PREFERRED:
127 return "preferred";
128 case PNP_RES_PRIORITY_ACCEPTABLE:
129 return "acceptable";
130 case PNP_RES_PRIORITY_FUNCTIONAL:
131 return "functional";
81 } 132 }
82 for (i = 0; i < PNP_MAX_MEM; i++) { 133 return "invalid";
83 res = pnp_get_resource(dev, IORESOURCE_MEM, i); 134}
84 if (res && !(res->flags & IORESOURCE_UNSET)) 135
85 dev_dbg(&dev->dev, " mem %#llx-%#llx flags %#lx\n", 136void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option)
86 (unsigned long long) res->start, 137{
87 (unsigned long long) res->end, res->flags); 138#ifdef DEBUG
139 char buf[128];
140 int len = 0, i;
141 struct pnp_port *port;
142 struct pnp_mem *mem;
143 struct pnp_irq *irq;
144 struct pnp_dma *dma;
145
146 if (pnp_option_is_dependent(option))
147 len += snprintf(buf + len, sizeof(buf) - len,
148 " dependent set %d (%s) ",
149 pnp_option_set(option),
150 pnp_option_priority_name(option));
151 else
152 len += snprintf(buf + len, sizeof(buf) - len, " independent ");
153
154 switch (option->type) {
155 case IORESOURCE_IO:
156 port = &option->u.port;
157 len += snprintf(buf + len, sizeof(buf) - len, "io min %#llx "
158 "max %#llx align %lld size %lld flags %#x",
159 (unsigned long long) port->min,
160 (unsigned long long) port->max,
161 (unsigned long long) port->align,
162 (unsigned long long) port->size, port->flags);
163 break;
164 case IORESOURCE_MEM:
165 mem = &option->u.mem;
166 len += snprintf(buf + len, sizeof(buf) - len, "mem min %#llx "
167 "max %#llx align %lld size %lld flags %#x",
168 (unsigned long long) mem->min,
169 (unsigned long long) mem->max,
170 (unsigned long long) mem->align,
171 (unsigned long long) mem->size, mem->flags);
172 break;
173 case IORESOURCE_IRQ:
174 irq = &option->u.irq;
175 len += snprintf(buf + len, sizeof(buf) - len, "irq");
176 if (bitmap_empty(irq->map.bits, PNP_IRQ_NR))
177 len += snprintf(buf + len, sizeof(buf) - len,
178 " <none>");
179 else {
180 for (i = 0; i < PNP_IRQ_NR; i++)
181 if (test_bit(i, irq->map.bits))
182 len += snprintf(buf + len,
183 sizeof(buf) - len,
184 " %d", i);
185 }
186 len += snprintf(buf + len, sizeof(buf) - len, " flags %#x",
187 irq->flags);
188 if (irq->flags & IORESOURCE_IRQ_OPTIONAL)
189 len += snprintf(buf + len, sizeof(buf) - len,
190 " (optional)");
191 break;
192 case IORESOURCE_DMA:
193 dma = &option->u.dma;
194 len += snprintf(buf + len, sizeof(buf) - len, "dma");
195 if (!dma->map)
196 len += snprintf(buf + len, sizeof(buf) - len,
197 " <none>");
198 else {
199 for (i = 0; i < 8; i++)
200 if (dma->map & (1 << i))
201 len += snprintf(buf + len,
202 sizeof(buf) - len,
203 " %d", i);
204 }
205 len += snprintf(buf + len, sizeof(buf) - len, " (bitmask %#x) "
206 "flags %#x", dma->map, dma->flags);
207 break;
88 } 208 }
209 dev_dbg(&dev->dev, "%s\n", buf);
89#endif 210#endif
90} 211}
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index cf4e07b01d48..764f3a310685 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -60,7 +60,7 @@ static void reserve_resources_of_dev(struct pnp_dev *dev)
60 int i; 60 int i;
61 61
62 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) { 62 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) {
63 if (res->flags & IORESOURCE_UNSET) 63 if (res->flags & IORESOURCE_DISABLED)
64 continue; 64 continue;
65 if (res->start == 0) 65 if (res->start == 0)
66 continue; /* disabled */ 66 continue; /* disabled */
@@ -81,7 +81,7 @@ static void reserve_resources_of_dev(struct pnp_dev *dev)
81 } 81 }
82 82
83 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) { 83 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
84 if (res->flags & (IORESOURCE_UNSET | IORESOURCE_DISABLED)) 84 if (res->flags & IORESOURCE_DISABLED)
85 continue; 85 continue;
86 86
87 reserve_range(dev, res->start, res->end, 0); 87 reserve_range(dev, res->start, res->end, 0);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index d91df38ee4f7..85fcb4371054 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -333,7 +333,8 @@ dasd_diag_check_device(struct dasd_device *device)
333 if (IS_ERR(block)) { 333 if (IS_ERR(block)) {
334 DEV_MESSAGE(KERN_WARNING, device, "%s", 334 DEV_MESSAGE(KERN_WARNING, device, "%s",
335 "could not allocate dasd block structure"); 335 "could not allocate dasd block structure");
336 kfree(device->private); 336 device->private = NULL;
337 kfree(private);
337 return PTR_ERR(block); 338 return PTR_ERR(block);
338 } 339 }
339 device->block = block; 340 device->block = block;
@@ -348,7 +349,8 @@ dasd_diag_check_device(struct dasd_device *device)
348 if (rc) { 349 if (rc) {
349 DEV_MESSAGE(KERN_WARNING, device, "failed to retrieve device " 350 DEV_MESSAGE(KERN_WARNING, device, "failed to retrieve device "
350 "information (rc=%d)", rc); 351 "information (rc=%d)", rc);
351 return -ENOTSUPP; 352 rc = -EOPNOTSUPP;
353 goto out;
352 } 354 }
353 355
354 /* Figure out position of label block */ 356 /* Figure out position of label block */
@@ -362,7 +364,8 @@ dasd_diag_check_device(struct dasd_device *device)
362 default: 364 default:
363 DEV_MESSAGE(KERN_WARNING, device, "unsupported device class " 365 DEV_MESSAGE(KERN_WARNING, device, "unsupported device class "
364 "(class=%d)", private->rdc_data.vdev_class); 366 "(class=%d)", private->rdc_data.vdev_class);
365 return -ENOTSUPP; 367 rc = -EOPNOTSUPP;
368 goto out;
366 } 369 }
367 370
368 DBF_DEV_EVENT(DBF_INFO, device, 371 DBF_DEV_EVENT(DBF_INFO, device,
@@ -379,7 +382,8 @@ dasd_diag_check_device(struct dasd_device *device)
379 if (label == NULL) { 382 if (label == NULL) {
380 DEV_MESSAGE(KERN_WARNING, device, "%s", 383 DEV_MESSAGE(KERN_WARNING, device, "%s",
381 "No memory to allocate initialization request"); 384 "No memory to allocate initialization request");
382 return -ENOMEM; 385 rc = -ENOMEM;
386 goto out;
383 } 387 }
384 rc = 0; 388 rc = 0;
385 end_block = 0; 389 end_block = 0;
@@ -403,7 +407,7 @@ dasd_diag_check_device(struct dasd_device *device)
403 DEV_MESSAGE(KERN_WARNING, device, "%s", 407 DEV_MESSAGE(KERN_WARNING, device, "%s",
404 "DIAG call failed"); 408 "DIAG call failed");
405 rc = -EOPNOTSUPP; 409 rc = -EOPNOTSUPP;
406 goto out; 410 goto out_label;
407 } 411 }
408 mdsk_term_io(device); 412 mdsk_term_io(device);
409 if (rc == 0) 413 if (rc == 0)
@@ -413,7 +417,7 @@ dasd_diag_check_device(struct dasd_device *device)
413 DEV_MESSAGE(KERN_WARNING, device, "device access failed " 417 DEV_MESSAGE(KERN_WARNING, device, "device access failed "
414 "(rc=%d)", rc); 418 "(rc=%d)", rc);
415 rc = -EIO; 419 rc = -EIO;
416 goto out; 420 goto out_label;
417 } 421 }
418 /* check for label block */ 422 /* check for label block */
419 if (memcmp(label->label_id, DASD_DIAG_CMS1, 423 if (memcmp(label->label_id, DASD_DIAG_CMS1,
@@ -439,8 +443,15 @@ dasd_diag_check_device(struct dasd_device *device)
439 (unsigned long) (block->blocks << 443 (unsigned long) (block->blocks <<
440 block->s2b_shift) >> 1); 444 block->s2b_shift) >> 1);
441 } 445 }
442out: 446out_label:
443 free_page((long) label); 447 free_page((long) label);
448out:
449 if (rc) {
450 device->block = NULL;
451 dasd_free_block(block);
452 device->private = NULL;
453 kfree(private);
454 }
444 return rc; 455 return rc;
445} 456}
446 457
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index e0b77210d37a..3590fdb5b2fd 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1418,8 +1418,10 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1418 1418
1419 1419
1420 /* service information message SIM */ 1420 /* service information message SIM */
1421 if ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE) { 1421 if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) &&
1422 ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1422 dasd_3990_erp_handle_sim(device, irb->ecw); 1423 dasd_3990_erp_handle_sim(device, irb->ecw);
1424 dasd_schedule_device_bh(device);
1423 return; 1425 return;
1424 } 1426 }
1425 1427
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index aee4656127f7..aa0c533423a5 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -139,7 +139,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
139 if (IS_ERR(block)) { 139 if (IS_ERR(block)) {
140 DEV_MESSAGE(KERN_WARNING, device, "%s", 140 DEV_MESSAGE(KERN_WARNING, device, "%s",
141 "could not allocate dasd block structure"); 141 "could not allocate dasd block structure");
142 kfree(device->private); 142 device->private = NULL;
143 kfree(private);
143 return PTR_ERR(block); 144 return PTR_ERR(block);
144 } 145 }
145 device->block = block; 146 device->block = block;
@@ -152,6 +153,10 @@ dasd_fba_check_characteristics(struct dasd_device *device)
152 DEV_MESSAGE(KERN_WARNING, device, 153 DEV_MESSAGE(KERN_WARNING, device,
153 "Read device characteristics returned error %d", 154 "Read device characteristics returned error %d",
154 rc); 155 rc);
156 device->block = NULL;
157 dasd_free_block(block);
158 device->private = NULL;
159 kfree(private);
155 return rc; 160 return rc;
156 } 161 }
157 162
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 0a9f1cccbe58..b0ac44b27127 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -345,7 +345,7 @@ static int get_urd_class(struct urdev *urd)
345 cc = diag210(&ur_diag210); 345 cc = diag210(&ur_diag210);
346 switch (cc) { 346 switch (cc) {
347 case 0: 347 case 0:
348 return -ENOTSUPP; 348 return -EOPNOTSUPP;
349 case 2: 349 case 2:
350 return ur_diag210.vrdcvcla; /* virtual device class */ 350 return ur_diag210.vrdcvcla; /* virtual device class */
351 case 3: 351 case 3:
@@ -621,7 +621,7 @@ static int verify_device(struct urdev *urd)
621 case DEV_CLASS_UR_I: 621 case DEV_CLASS_UR_I:
622 return verify_uri_device(urd); 622 return verify_uri_device(urd);
623 default: 623 default:
624 return -ENOTSUPP; 624 return -EOPNOTSUPP;
625 } 625 }
626} 626}
627 627
@@ -654,7 +654,7 @@ static int get_file_reclen(struct urdev *urd)
654 case DEV_CLASS_UR_I: 654 case DEV_CLASS_UR_I:
655 return get_uri_file_reclen(urd); 655 return get_uri_file_reclen(urd);
656 default: 656 default:
657 return -ENOTSUPP; 657 return -EOPNOTSUPP;
658 } 658 }
659} 659}
660 660
@@ -827,7 +827,7 @@ static int ur_probe(struct ccw_device *cdev)
827 goto fail_remove_attr; 827 goto fail_remove_attr;
828 } 828 }
829 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 829 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
830 rc = -ENOTSUPP; 830 rc = -EOPNOTSUPP;
831 goto fail_remove_attr; 831 goto fail_remove_attr;
832 } 832 }
833 spin_lock_irq(get_ccwdev_lock(cdev)); 833 spin_lock_irq(get_ccwdev_lock(cdev));
@@ -892,7 +892,7 @@ static int ur_set_online(struct ccw_device *cdev)
892 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 892 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
893 sprintf(node_id, "vmprt-%s", cdev->dev.bus_id); 893 sprintf(node_id, "vmprt-%s", cdev->dev.bus_id);
894 } else { 894 } else {
895 rc = -ENOTSUPP; 895 rc = -EOPNOTSUPP;
896 goto fail_free_cdev; 896 goto fail_free_cdev;
897 } 897 }
898 898
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 047dd92ae804..7fd84be11931 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -29,6 +29,7 @@
29 29
30#define TO_USER 0 30#define TO_USER 0
31#define TO_KERNEL 1 31#define TO_KERNEL 1
32#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
32 33
33enum arch_id { 34enum arch_id {
34 ARCH_S390 = 0, 35 ARCH_S390 = 0,
@@ -51,6 +52,7 @@ static struct debug_info *zcore_dbf;
51static int hsa_available; 52static int hsa_available;
52static struct dentry *zcore_dir; 53static struct dentry *zcore_dir;
53static struct dentry *zcore_file; 54static struct dentry *zcore_file;
55static struct dentry *zcore_memmap_file;
54 56
55/* 57/*
56 * Copy memory from HSA to kernel or user memory (not reentrant): 58 * Copy memory from HSA to kernel or user memory (not reentrant):
@@ -476,6 +478,54 @@ static const struct file_operations zcore_fops = {
476 .release = zcore_release, 478 .release = zcore_release,
477}; 479};
478 480
481static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
482 size_t count, loff_t *ppos)
483{
484 return simple_read_from_buffer(buf, count, ppos, filp->private_data,
485 MEMORY_CHUNKS * CHUNK_INFO_SIZE);
486}
487
488static int zcore_memmap_open(struct inode *inode, struct file *filp)
489{
490 int i;
491 char *buf;
492 struct mem_chunk *chunk_array;
493
494 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
495 GFP_KERNEL);
496 if (!chunk_array)
497 return -ENOMEM;
498 detect_memory_layout(chunk_array);
499 buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
500 if (!buf) {
501 kfree(chunk_array);
502 return -ENOMEM;
503 }
504 for (i = 0; i < MEMORY_CHUNKS; i++) {
505 sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ",
506 (unsigned long long) chunk_array[i].addr,
507 (unsigned long long) chunk_array[i].size);
508 if (chunk_array[i].size == 0)
509 break;
510 }
511 kfree(chunk_array);
512 filp->private_data = buf;
513 return 0;
514}
515
516static int zcore_memmap_release(struct inode *inode, struct file *filp)
517{
518 kfree(filp->private_data);
519 return 0;
520}
521
522static const struct file_operations zcore_memmap_fops = {
523 .owner = THIS_MODULE,
524 .read = zcore_memmap_read,
525 .open = zcore_memmap_open,
526 .release = zcore_memmap_release,
527};
528
479 529
480static void __init set_s390_lc_mask(union save_area *map) 530static void __init set_s390_lc_mask(union save_area *map)
481{ 531{
@@ -554,18 +604,44 @@ static int __init check_sdias(void)
554 return 0; 604 return 0;
555} 605}
556 606
557static void __init zcore_header_init(int arch, struct zcore_header *hdr) 607static int __init get_mem_size(unsigned long *mem)
608{
609 int i;
610 struct mem_chunk *chunk_array;
611
612 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
613 GFP_KERNEL);
614 if (!chunk_array)
615 return -ENOMEM;
616 detect_memory_layout(chunk_array);
617 for (i = 0; i < MEMORY_CHUNKS; i++) {
618 if (chunk_array[i].size == 0)
619 break;
620 *mem += chunk_array[i].size;
621 }
622 kfree(chunk_array);
623 return 0;
624}
625
626static int __init zcore_header_init(int arch, struct zcore_header *hdr)
558{ 627{
628 int rc;
629 unsigned long memory = 0;
630
559 if (arch == ARCH_S390X) 631 if (arch == ARCH_S390X)
560 hdr->arch_id = DUMP_ARCH_S390X; 632 hdr->arch_id = DUMP_ARCH_S390X;
561 else 633 else
562 hdr->arch_id = DUMP_ARCH_S390; 634 hdr->arch_id = DUMP_ARCH_S390;
563 hdr->mem_size = sys_info.mem_size; 635 rc = get_mem_size(&memory);
564 hdr->rmem_size = sys_info.mem_size; 636 if (rc)
637 return rc;
638 hdr->mem_size = memory;
639 hdr->rmem_size = memory;
565 hdr->mem_end = sys_info.mem_size; 640 hdr->mem_end = sys_info.mem_size;
566 hdr->num_pages = sys_info.mem_size / PAGE_SIZE; 641 hdr->num_pages = memory / PAGE_SIZE;
567 hdr->tod = get_clock(); 642 hdr->tod = get_clock();
568 get_cpu_id(&hdr->cpu_id); 643 get_cpu_id(&hdr->cpu_id);
644 return 0;
569} 645}
570 646
571static int __init zcore_init(void) 647static int __init zcore_init(void)
@@ -608,7 +684,9 @@ static int __init zcore_init(void)
608 if (rc) 684 if (rc)
609 goto fail; 685 goto fail;
610 686
611 zcore_header_init(arch, &zcore_header); 687 rc = zcore_header_init(arch, &zcore_header);
688 if (rc)
689 goto fail;
612 690
613 zcore_dir = debugfs_create_dir("zcore" , NULL); 691 zcore_dir = debugfs_create_dir("zcore" , NULL);
614 if (!zcore_dir) { 692 if (!zcore_dir) {
@@ -618,13 +696,22 @@ static int __init zcore_init(void)
618 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, 696 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
619 &zcore_fops); 697 &zcore_fops);
620 if (!zcore_file) { 698 if (!zcore_file) {
621 debugfs_remove(zcore_dir);
622 rc = -ENOMEM; 699 rc = -ENOMEM;
623 goto fail; 700 goto fail_dir;
701 }
702 zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
703 NULL, &zcore_memmap_fops);
704 if (!zcore_memmap_file) {
705 rc = -ENOMEM;
706 goto fail_file;
624 } 707 }
625 hsa_available = 1; 708 hsa_available = 1;
626 return 0; 709 return 0;
627 710
711fail_file:
712 debugfs_remove(zcore_file);
713fail_dir:
714 debugfs_remove(zcore_dir);
628fail: 715fail:
629 diag308(DIAG308_REL_HSA, NULL); 716 diag308(DIAG308_REL_HSA, NULL);
630 return rc; 717 return rc;
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index 91e9e3f3073a..bd79bd165396 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -9,4 +9,6 @@ ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o 10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
11obj-$(CONFIG_CCWGROUP) += ccwgroup.o 11obj-$(CONFIG_CCWGROUP) += ccwgroup.o
12
13qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
12obj-$(CONFIG_QDIO) += qdio.o 14obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 65264a38057d..29826fdd47b8 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -27,7 +27,13 @@
27 27
28static void *sei_page; 28static void *sei_page;
29 29
30static int chsc_error_from_response(int response) 30/**
31 * chsc_error_from_response() - convert a chsc response to an error
32 * @response: chsc response code
33 *
34 * Returns an appropriate Linux error code for @response.
35 */
36int chsc_error_from_response(int response)
31{ 37{
32 switch (response) { 38 switch (response) {
33 case 0x0001: 39 case 0x0001:
@@ -45,6 +51,7 @@ static int chsc_error_from_response(int response)
45 return -EIO; 51 return -EIO;
46 } 52 }
47} 53}
54EXPORT_SYMBOL_GPL(chsc_error_from_response);
48 55
49struct chsc_ssd_area { 56struct chsc_ssd_area {
50 struct chsc_header request; 57 struct chsc_header request;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index fb6c4d6c45b4..ba59bceace98 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -101,4 +101,6 @@ void chsc_chp_online(struct chp_id chpid);
101void chsc_chp_offline(struct chp_id chpid); 101void chsc_chp_offline(struct chp_id chpid);
102int chsc_get_channel_measurement_chars(struct channel_path *chp); 102int chsc_get_channel_measurement_chars(struct channel_path *chp);
103 103
104int chsc_error_from_response(int response);
105
104#endif 106#endif
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
deleted file mode 100644
index 2bf36e14b102..000000000000
--- a/drivers/s390/cio/qdio.c
+++ /dev/null
@@ -1,3929 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/cio/qdio.c
4 *
5 * Linux for S/390 QDIO base support, Hipersocket base support
6 * version 2
7 *
8 * Copyright 2000,2002 IBM Corporation
9 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
10 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 *
12 * Restriction: only 63 iqdio subchannels would have its own indicator,
13 * after that, subsequent subchannels share one indicator
14 *
15 *
16 *
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/delay.h>
36#include <linux/slab.h>
37#include <linux/kernel.h>
38#include <linux/proc_fs.h>
39#include <linux/timer.h>
40#include <linux/mempool.h>
41#include <linux/semaphore.h>
42
43#include <asm/ccwdev.h>
44#include <asm/io.h>
45#include <asm/atomic.h>
46#include <asm/timex.h>
47
48#include <asm/debug.h>
49#include <asm/s390_rdev.h>
50#include <asm/qdio.h>
51#include <asm/airq.h>
52
53#include "cio.h"
54#include "css.h"
55#include "device.h"
56#include "qdio.h"
57#include "ioasm.h"
58#include "chsc.h"
59
60/****************** MODULE PARAMETER VARIABLES ********************/
61MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
62MODULE_DESCRIPTION("QDIO base support version 2, " \
63 "Copyright 2000 IBM Corporation");
64MODULE_LICENSE("GPL");
65
66/******************** HERE WE GO ***********************************/
67
68static const char version[] = "QDIO base support version 2";
69
70static int qdio_performance_stats = 0;
71static int proc_perf_file_registration;
72static struct qdio_perf_stats perf_stats;
73
74static int hydra_thinints;
75static int is_passthrough = 0;
76static int omit_svs;
77
78static int indicator_used[INDICATORS_PER_CACHELINE];
79static __u32 * volatile indicators;
80static __u32 volatile spare_indicator;
81static atomic_t spare_indicator_usecount;
82#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
83static mempool_t *qdio_mempool_scssc;
84static struct kmem_cache *qdio_q_cache;
85
86static debug_info_t *qdio_dbf_setup;
87static debug_info_t *qdio_dbf_sbal;
88static debug_info_t *qdio_dbf_trace;
89static debug_info_t *qdio_dbf_sense;
90#ifdef CONFIG_QDIO_DEBUG
91static debug_info_t *qdio_dbf_slsb_out;
92static debug_info_t *qdio_dbf_slsb_in;
93#endif /* CONFIG_QDIO_DEBUG */
94
95/* iQDIO stuff: */
96static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97 during a while loop */
98static DEFINE_SPINLOCK(ttiq_list_lock);
99static void *tiqdio_ind;
100static void tiqdio_tl(unsigned long);
101static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
102
103/* not a macro, as one of the arguments is atomic_read */
104static inline int
105qdio_min(int a,int b)
106{
107 if (a<b)
108 return a;
109 else
110 return b;
111}
112
113/***************** SCRUBBER HELPER ROUTINES **********************/
114#ifdef CONFIG_64BIT
115static inline void qdio_perf_stat_inc(atomic64_t *count)
116{
117 if (qdio_performance_stats)
118 atomic64_inc(count);
119}
120
121static inline void qdio_perf_stat_dec(atomic64_t *count)
122{
123 if (qdio_performance_stats)
124 atomic64_dec(count);
125}
126#else /* CONFIG_64BIT */
127static inline void qdio_perf_stat_inc(atomic_t *count)
128{
129 if (qdio_performance_stats)
130 atomic_inc(count);
131}
132
133static inline void qdio_perf_stat_dec(atomic_t *count)
134{
135 if (qdio_performance_stats)
136 atomic_dec(count);
137}
138#endif /* CONFIG_64BIT */
139
140static inline __u64
141qdio_get_micros(void)
142{
143 return (get_clock() >> 12); /* time>>12 is microseconds */
144}
145
146/*
147 * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
148 * the q in any case, so that we'll not be interrupted when we are in
149 * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
150 * ever works (last famous words)
151 */
152static inline int
153qdio_reserve_q(struct qdio_q *q)
154{
155 return atomic_add_return(1,&q->use_count) - 1;
156}
157
158static inline void
159qdio_release_q(struct qdio_q *q)
160{
161 atomic_dec(&q->use_count);
162}
163
164/*check ccq */
165static int
166qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
167{
168 char dbf_text[15];
169
170 if (ccq == 0 || ccq == 32)
171 return 0;
172 if (ccq == 96 || ccq == 97)
173 return 1;
174 /*notify devices immediately*/
175 sprintf(dbf_text,"%d", ccq);
176 QDIO_DBF_TEXT2(1,trace,dbf_text);
177 return -EIO;
178}
179/* EQBS: extract buffer states */
180static int
181qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
182 unsigned int *start, unsigned int *cnt)
183{
184 struct qdio_irq *irq;
185 unsigned int tmp_cnt, q_no, ccq;
186 int rc ;
187 char dbf_text[15];
188
189 ccq = 0;
190 tmp_cnt = *cnt;
191 irq = (struct qdio_irq*)q->irq_ptr;
192 q_no = q->q_no;
193 if(!q->is_input_q)
194 q_no += irq->no_input_qs;
195again:
196 ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
197 rc = qdio_check_ccq(q, ccq);
198 if ((ccq == 96) && (tmp_cnt != *cnt))
199 rc = 0;
200 if (rc == 1) {
201 QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
202 goto again;
203 }
204 if (rc < 0) {
205 QDIO_DBF_TEXT2(1,trace,"eqberr");
206 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
207 QDIO_DBF_TEXT2(1,trace,dbf_text);
208 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
209 QDIO_STATUS_LOOK_FOR_ERROR,
210 0, 0, 0, -1, -1, q->int_parm);
211 return 0;
212 }
213 return (tmp_cnt - *cnt);
214}
215
216/* SQBS: set buffer states */
217static int
218qdio_do_sqbs(struct qdio_q *q, unsigned char state,
219 unsigned int *start, unsigned int *cnt)
220{
221 struct qdio_irq *irq;
222 unsigned int tmp_cnt, q_no, ccq;
223 int rc;
224 char dbf_text[15];
225
226 ccq = 0;
227 tmp_cnt = *cnt;
228 irq = (struct qdio_irq*)q->irq_ptr;
229 q_no = q->q_no;
230 if(!q->is_input_q)
231 q_no += irq->no_input_qs;
232again:
233 ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
234 rc = qdio_check_ccq(q, ccq);
235 if (rc == 1) {
236 QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
237 goto again;
238 }
239 if (rc < 0) {
240 QDIO_DBF_TEXT3(1,trace,"sqberr");
241 sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt);
242 QDIO_DBF_TEXT3(1,trace,dbf_text);
243 sprintf(dbf_text,"%d,%d",ccq,q_no);
244 QDIO_DBF_TEXT3(1,trace,dbf_text);
245 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
246 QDIO_STATUS_LOOK_FOR_ERROR,
247 0, 0, 0, -1, -1, q->int_parm);
248 return 0;
249 }
250 return (tmp_cnt - *cnt);
251}
252
253static inline int
254qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
255 unsigned char state, unsigned int *count)
256{
257 volatile char *slsb;
258 struct qdio_irq *irq;
259
260 irq = (struct qdio_irq*)q->irq_ptr;
261 if (!irq->is_qebsm) {
262 slsb = (char *)&q->slsb.acc.val[(*bufno)];
263 xchg(slsb, state);
264 return 1;
265 }
266 return qdio_do_sqbs(q, state, bufno, count);
267}
268
269#ifdef CONFIG_QDIO_DEBUG
270static inline void
271qdio_trace_slsb(struct qdio_q *q)
272{
273 if (q->queue_type==QDIO_TRACE_QTYPE) {
274 if (q->is_input_q)
275 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
276 QDIO_MAX_BUFFERS_PER_Q);
277 else
278 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
279 QDIO_MAX_BUFFERS_PER_Q);
280 }
281}
282#endif
283
284static inline int
285set_slsb(struct qdio_q *q, unsigned int *bufno,
286 unsigned char state, unsigned int *count)
287{
288 int rc;
289#ifdef CONFIG_QDIO_DEBUG
290 qdio_trace_slsb(q);
291#endif
292 rc = qdio_set_slsb(q, bufno, state, count);
293#ifdef CONFIG_QDIO_DEBUG
294 qdio_trace_slsb(q);
295#endif
296 return rc;
297}
298static inline int
299qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
300 unsigned int gpr3)
301{
302 int cc;
303
304 QDIO_DBF_TEXT4(0,trace,"sigasync");
305 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
306
307 qdio_perf_stat_inc(&perf_stats.siga_syncs);
308
309 cc = do_siga_sync(q->schid, gpr2, gpr3);
310 if (cc)
311 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
312
313 return cc;
314}
315
316static inline int
317qdio_siga_sync_q(struct qdio_q *q)
318{
319 if (q->is_input_q)
320 return qdio_siga_sync(q, 0, q->mask);
321 return qdio_siga_sync(q, q->mask, 0);
322}
323
324static int
325__do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
326{
327 struct qdio_irq *irq;
328 unsigned int fc = 0;
329 unsigned long schid;
330
331 irq = (struct qdio_irq *) q->irq_ptr;
332 if (!irq->is_qebsm)
333 schid = *((u32 *)&q->schid);
334 else {
335 schid = irq->sch_token;
336 fc |= 0x80;
337 }
338 return do_siga_output(schid, q->mask, busy_bit, fc);
339}
340
341/*
342 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
343 * an access exception
344 */
345static int
346qdio_siga_output(struct qdio_q *q)
347{
348 int cc;
349 __u32 busy_bit;
350 __u64 start_time=0;
351
352 qdio_perf_stat_inc(&perf_stats.siga_outs);
353
354 QDIO_DBF_TEXT4(0,trace,"sigaout");
355 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
356
357 for (;;) {
358 cc = __do_siga_output(q, &busy_bit);
359//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
360 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
361 if (!start_time)
362 start_time=NOW;
363 if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
364 break;
365 } else
366 break;
367 }
368
369 if ((cc==2) && (busy_bit))
370 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
371
372 if (cc)
373 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
374
375 return cc;
376}
377
378static int
379qdio_siga_input(struct qdio_q *q)
380{
381 int cc;
382
383 QDIO_DBF_TEXT4(0,trace,"sigain");
384 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
385
386 qdio_perf_stat_inc(&perf_stats.siga_ins);
387
388 cc = do_siga_input(q->schid, q->mask);
389
390 if (cc)
391 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
392
393 return cc;
394}
395
396/* locked by the locks in qdio_activate and qdio_cleanup */
397static __u32 *
398qdio_get_indicator(void)
399{
400 int i;
401
402 for (i = 0; i < INDICATORS_PER_CACHELINE; i++)
403 if (!indicator_used[i]) {
404 indicator_used[i]=1;
405 return indicators+i;
406 }
407 atomic_inc(&spare_indicator_usecount);
408 return (__u32 * volatile) &spare_indicator;
409}
410
411/* locked by the locks in qdio_activate and qdio_cleanup */
412static void
413qdio_put_indicator(__u32 *addr)
414{
415 int i;
416
417 if ( (addr) && (addr!=&spare_indicator) ) {
418 i=addr-indicators;
419 indicator_used[i]=0;
420 }
421 if (addr == &spare_indicator)
422 atomic_dec(&spare_indicator_usecount);
423}
424
425static inline void
426tiqdio_clear_summary_bit(__u32 *location)
427{
428 QDIO_DBF_TEXT5(0,trace,"clrsummb");
429 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
430
431 xchg(location,0);
432}
433
434static inline void
435tiqdio_set_summary_bit(__u32 *location)
436{
437 QDIO_DBF_TEXT5(0,trace,"setsummb");
438 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
439
440 xchg(location,-1);
441}
442
443static inline void
444tiqdio_sched_tl(void)
445{
446 tasklet_hi_schedule(&tiqdio_tasklet);
447}
448
449static void
450qdio_mark_tiq(struct qdio_q *q)
451{
452 unsigned long flags;
453
454 QDIO_DBF_TEXT4(0,trace,"mark iq");
455 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
456
457 spin_lock_irqsave(&ttiq_list_lock,flags);
458 if (unlikely(atomic_read(&q->is_in_shutdown)))
459 goto out_unlock;
460
461 if (!q->is_input_q)
462 goto out_unlock;
463
464 if ((q->list_prev) || (q->list_next))
465 goto out_unlock;
466
467 if (!tiq_list) {
468 tiq_list=q;
469 q->list_prev=q;
470 q->list_next=q;
471 } else {
472 q->list_next=tiq_list;
473 q->list_prev=tiq_list->list_prev;
474 tiq_list->list_prev->list_next=q;
475 tiq_list->list_prev=q;
476 }
477 spin_unlock_irqrestore(&ttiq_list_lock,flags);
478
479 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
480 tiqdio_sched_tl();
481 return;
482out_unlock:
483 spin_unlock_irqrestore(&ttiq_list_lock,flags);
484 return;
485}
486
487static inline void
488qdio_mark_q(struct qdio_q *q)
489{
490 QDIO_DBF_TEXT4(0,trace,"mark q");
491 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
492
493 if (unlikely(atomic_read(&q->is_in_shutdown)))
494 return;
495
496 tasklet_schedule(&q->tasklet);
497}
498
499static int
500qdio_stop_polling(struct qdio_q *q)
501{
502#ifdef QDIO_USE_PROCESSING_STATE
503 unsigned int tmp, gsf, count = 1;
504 unsigned char state = 0;
505 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
506
507 if (!atomic_xchg(&q->polling,0))
508 return 1;
509
510 QDIO_DBF_TEXT4(0,trace,"stoppoll");
511 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
512
513 /* show the card that we are not polling anymore */
514 if (!q->is_input_q)
515 return 1;
516
517 tmp = gsf = GET_SAVED_FRONTIER(q);
518 tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
519 set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
520
521 /*
522 * we don't issue this SYNC_MEMORY, as we trust Rick T and
523 * moreover will not use the PROCESSING state under VM, so
524 * q->polling was 0 anyway
525 */
526 /*SYNC_MEMORY;*/
527 if (irq->is_qebsm) {
528 count = 1;
529 qdio_do_eqbs(q, &state, &gsf, &count);
530 } else
531 state = q->slsb.acc.val[gsf];
532 if (state != SLSB_P_INPUT_PRIMED)
533 return 1;
534 /*
535 * set our summary bit again, as otherwise there is a
536 * small window we can miss between resetting it and
537 * checking for PRIMED state
538 */
539 if (q->is_thinint_q)
540 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
541 return 0;
542
543#else /* QDIO_USE_PROCESSING_STATE */
544 return 1;
545#endif /* QDIO_USE_PROCESSING_STATE */
546}
547
548/*
549 * see the comment in do_QDIO and before qdio_reserve_q about the
550 * sophisticated locking outside of unmark_q, so that we don't need to
551 * disable the interrupts :-)
552*/
553static void
554qdio_unmark_q(struct qdio_q *q)
555{
556 unsigned long flags;
557
558 QDIO_DBF_TEXT4(0,trace,"unmark q");
559 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
560
561 if ((!q->list_prev)||(!q->list_next))
562 return;
563
564 if ((q->is_thinint_q)&&(q->is_input_q)) {
565 /* iQDIO */
566 spin_lock_irqsave(&ttiq_list_lock,flags);
567 /* in case cleanup has done this already and simultanously
568 * qdio_unmark_q is called from the interrupt handler, we've
569 * got to check this in this specific case again */
570 if ((!q->list_prev)||(!q->list_next))
571 goto out;
572 if (q->list_next==q) {
573 /* q was the only interesting q */
574 tiq_list=NULL;
575 q->list_next=NULL;
576 q->list_prev=NULL;
577 } else {
578 q->list_next->list_prev=q->list_prev;
579 q->list_prev->list_next=q->list_next;
580 tiq_list=q->list_next;
581 q->list_next=NULL;
582 q->list_prev=NULL;
583 }
584out:
585 spin_unlock_irqrestore(&ttiq_list_lock,flags);
586 }
587}
588
589static inline unsigned long
590tiqdio_clear_global_summary(void)
591{
592 unsigned long time;
593
594 QDIO_DBF_TEXT5(0,trace,"clrglobl");
595
596 time = do_clear_global_summary();
597
598 QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
599
600 return time;
601}
602
603
604/************************* OUTBOUND ROUTINES *******************************/
605static int
606qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
607{
608 struct qdio_irq *irq;
609 unsigned char state;
610 unsigned int cnt, count, ftc;
611
612 irq = (struct qdio_irq *) q->irq_ptr;
613 if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
614 SYNC_MEMORY;
615
616 ftc = q->first_to_check;
617 count = qdio_min(atomic_read(&q->number_of_buffers_used),
618 (QDIO_MAX_BUFFERS_PER_Q-1));
619 if (count == 0)
620 return q->first_to_check;
621 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
622 if (cnt == 0)
623 return q->first_to_check;
624 switch (state) {
625 case SLSB_P_OUTPUT_ERROR:
626 QDIO_DBF_TEXT3(0,trace,"outperr");
627 atomic_sub(cnt , &q->number_of_buffers_used);
628 if (q->qdio_error)
629 q->error_status_flags |=
630 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
631 q->qdio_error = SLSB_P_OUTPUT_ERROR;
632 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
633 q->first_to_check = ftc;
634 break;
635 case SLSB_P_OUTPUT_EMPTY:
636 QDIO_DBF_TEXT5(0,trace,"outpempt");
637 atomic_sub(cnt, &q->number_of_buffers_used);
638 q->first_to_check = ftc;
639 break;
640 case SLSB_CU_OUTPUT_PRIMED:
641 /* all buffers primed */
642 QDIO_DBF_TEXT5(0,trace,"outpprim");
643 break;
644 default:
645 break;
646 }
647 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
648 return q->first_to_check;
649}
650
651static int
652qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
653{
654 struct qdio_irq *irq;
655 unsigned char state;
656 int tmp, ftc, count, cnt;
657 char dbf_text[15];
658
659
660 irq = (struct qdio_irq *) q->irq_ptr;
661 ftc = q->first_to_check;
662 count = qdio_min(atomic_read(&q->number_of_buffers_used),
663 (QDIO_MAX_BUFFERS_PER_Q-1));
664 if (count == 0)
665 return q->first_to_check;
666 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
667 if (cnt == 0)
668 return q->first_to_check;
669 switch (state) {
670 case SLSB_P_INPUT_ERROR :
671#ifdef CONFIG_QDIO_DEBUG
672 QDIO_DBF_TEXT3(1,trace,"inperr");
673 sprintf(dbf_text,"%2x,%2x",ftc,count);
674 QDIO_DBF_TEXT3(1,trace,dbf_text);
675#endif /* CONFIG_QDIO_DEBUG */
676 if (q->qdio_error)
677 q->error_status_flags |=
678 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
679 q->qdio_error = SLSB_P_INPUT_ERROR;
680 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
681 atomic_sub(cnt, &q->number_of_buffers_used);
682 q->first_to_check = ftc;
683 break;
684 case SLSB_P_INPUT_PRIMED :
685 QDIO_DBF_TEXT3(0,trace,"inptprim");
686 sprintf(dbf_text,"%2x,%2x",ftc,count);
687 QDIO_DBF_TEXT3(1,trace,dbf_text);
688 tmp = 0;
689 ftc = q->first_to_check;
690#ifdef QDIO_USE_PROCESSING_STATE
691 if (cnt > 1) {
692 cnt -= 1;
693 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
694 if (!tmp)
695 break;
696 }
697 cnt = 1;
698 tmp += set_slsb(q, &ftc,
699 SLSB_P_INPUT_PROCESSING, &cnt);
700 atomic_set(&q->polling, 1);
701#else
702 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
703#endif
704 atomic_sub(tmp, &q->number_of_buffers_used);
705 q->first_to_check = ftc;
706 break;
707 case SLSB_CU_INPUT_EMPTY:
708 case SLSB_P_INPUT_NOT_INIT:
709 case SLSB_P_INPUT_PROCESSING:
710 QDIO_DBF_TEXT5(0,trace,"inpnipro");
711 break;
712 default:
713 break;
714 }
715 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
716 return q->first_to_check;
717}
718
719static int
720qdio_get_outbound_buffer_frontier(struct qdio_q *q)
721{
722 struct qdio_irq *irq;
723 volatile char *slsb;
724 unsigned int count = 1;
725 int first_not_to_check, f, f_mod_no;
726 char dbf_text[15];
727
728 QDIO_DBF_TEXT4(0,trace,"getobfro");
729 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
730
731 irq = (struct qdio_irq *) q->irq_ptr;
732 if (irq->is_qebsm)
733 return qdio_qebsm_get_outbound_buffer_frontier(q);
734
735 slsb=&q->slsb.acc.val[0];
736 f_mod_no=f=q->first_to_check;
737 /*
738 * f points to already processed elements, so f+no_used is correct...
739 * ... but: we don't check 128 buffers, as otherwise
740 * qdio_has_outbound_q_moved would return 0
741 */
742 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
743 (QDIO_MAX_BUFFERS_PER_Q-1));
744
745 if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) ||
746 (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH))
747 SYNC_MEMORY;
748
749check_next:
750 if (f==first_not_to_check)
751 goto out;
752
753 switch(slsb[f_mod_no]) {
754
755 /* the adapter has not fetched the output yet */
756 case SLSB_CU_OUTPUT_PRIMED:
757 QDIO_DBF_TEXT5(0,trace,"outpprim");
758 break;
759
760 /* the adapter got it */
761 case SLSB_P_OUTPUT_EMPTY:
762 atomic_dec(&q->number_of_buffers_used);
763 f++;
764 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
765 QDIO_DBF_TEXT5(0,trace,"outpempt");
766 goto check_next;
767
768 case SLSB_P_OUTPUT_ERROR:
769 QDIO_DBF_TEXT3(0,trace,"outperr");
770 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
771 q->sbal[f_mod_no]->element[14].sbalf.value,
772 q->sbal[f_mod_no]->element[15].sbalf.value);
773 QDIO_DBF_TEXT3(1,trace,dbf_text);
774 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
775
776 /* kind of process the buffer */
777 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
778
779 /*
780 * we increment the frontier, as this buffer
781 * was processed obviously
782 */
783 atomic_dec(&q->number_of_buffers_used);
784 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
785
786 if (q->qdio_error)
787 q->error_status_flags|=
788 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
789 q->qdio_error=SLSB_P_OUTPUT_ERROR;
790 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
791
792 break;
793
794 /* no new buffers */
795 default:
796 QDIO_DBF_TEXT5(0,trace,"outpni");
797 }
798out:
799 return (q->first_to_check=f_mod_no);
800}
801
802/* all buffers are processed */
803static int
804qdio_is_outbound_q_done(struct qdio_q *q)
805{
806 int no_used;
807#ifdef CONFIG_QDIO_DEBUG
808 char dbf_text[15];
809#endif
810
811 no_used=atomic_read(&q->number_of_buffers_used);
812
813#ifdef CONFIG_QDIO_DEBUG
814 if (no_used) {
815 sprintf(dbf_text,"oqisnt%02x",no_used);
816 QDIO_DBF_TEXT4(0,trace,dbf_text);
817 } else {
818 QDIO_DBF_TEXT4(0,trace,"oqisdone");
819 }
820 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
821#endif /* CONFIG_QDIO_DEBUG */
822 return (no_used==0);
823}
824
825static int
826qdio_has_outbound_q_moved(struct qdio_q *q)
827{
828 int i;
829
830 i=qdio_get_outbound_buffer_frontier(q);
831
832 if ( (i!=GET_SAVED_FRONTIER(q)) ||
833 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
834 SAVE_FRONTIER(q,i);
835 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
836 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
837 return 1;
838 } else {
839 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
840 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
841 return 0;
842 }
843}
844
845static void
846qdio_kick_outbound_q(struct qdio_q *q)
847{
848 int result;
849#ifdef CONFIG_QDIO_DEBUG
850 char dbf_text[15];
851
852 QDIO_DBF_TEXT4(0,trace,"kickoutq");
853 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
854#endif /* CONFIG_QDIO_DEBUG */
855
856 if (!q->siga_out)
857 return;
858
859 /* here's the story with cc=2 and busy bit set (thanks, Rick):
860 * VM's CP could present us cc=2 and busy bit set on SIGA-write
861 * during reconfiguration of their Guest LAN (only in HIPERS mode,
862 * QDIO mode is asynchronous -- cc=2 and busy bit there will take
863 * the queues down immediately; and not being under VM we have a
864 * problem on cc=2 and busy bit set right away).
865 *
866 * Therefore qdio_siga_output will try for a short time constantly,
867 * if such a condition occurs. If it doesn't change, it will
868 * increase the busy_siga_counter and save the timestamp, and
869 * schedule the queue for later processing (via mark_q, using the
870 * queue tasklet). __qdio_outbound_processing will check out the
871 * counter. If non-zero, it will call qdio_kick_outbound_q as often
872 * as the value of the counter. This will attempt further SIGA
873 * instructions. For each successful SIGA, the counter is
874 * decreased, for failing SIGAs the counter remains the same, after
875 * all.
876 * After some time of no movement, qdio_kick_outbound_q will
877 * finally fail and reflect corresponding error codes to call
878 * the upper layer module and have it take the queues down.
879 *
880 * Note that this is a change from the original HiperSockets design
881 * (saying cc=2 and busy bit means take the queues down), but in
882 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
883 * conditions will still take the queues down, but the threshold is
884 * higher due to the Guest LAN environment.
885 */
886
887
888 result=qdio_siga_output(q);
889
890 switch (result) {
891 case 0:
892 /* went smooth this time, reset timestamp */
893#ifdef CONFIG_QDIO_DEBUG
894 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
895 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
896 atomic_read(&q->busy_siga_counter));
897 QDIO_DBF_TEXT3(0,trace,dbf_text);
898#endif /* CONFIG_QDIO_DEBUG */
899 q->timing.busy_start=0;
900 break;
901 case (2|QDIO_SIGA_ERROR_B_BIT_SET):
902 /* cc=2 and busy bit: */
903 atomic_inc(&q->busy_siga_counter);
904
905 /* if the last siga was successful, save
906 * timestamp here */
907 if (!q->timing.busy_start)
908 q->timing.busy_start=NOW;
909
910 /* if we're in time, don't touch error_status_flags
911 * and siga_error */
912 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
913 qdio_mark_q(q);
914 break;
915 }
916 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
917#ifdef CONFIG_QDIO_DEBUG
918 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
919 atomic_read(&q->busy_siga_counter));
920 QDIO_DBF_TEXT3(0,trace,dbf_text);
921#endif /* CONFIG_QDIO_DEBUG */
922 /* else fallthrough and report error */
923 default:
924 /* for plain cc=1, 2 or 3: */
925 if (q->siga_error)
926 q->error_status_flags|=
927 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
928 q->error_status_flags|=
929 QDIO_STATUS_LOOK_FOR_ERROR;
930 q->siga_error=result;
931 }
932}
933
934static void
935qdio_kick_outbound_handler(struct qdio_q *q)
936{
937 int start, end, real_end, count;
938#ifdef CONFIG_QDIO_DEBUG
939 char dbf_text[15];
940#endif
941
942 start = q->first_element_to_kick;
943 /* last_move_ftc was just updated */
944 real_end = GET_SAVED_FRONTIER(q);
945 end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
946 (QDIO_MAX_BUFFERS_PER_Q-1);
947 count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
948 (QDIO_MAX_BUFFERS_PER_Q-1);
949
950#ifdef CONFIG_QDIO_DEBUG
951 QDIO_DBF_TEXT4(0,trace,"kickouth");
952 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
953
954 sprintf(dbf_text,"s=%2xc=%2x",start,count);
955 QDIO_DBF_TEXT4(0,trace,dbf_text);
956#endif /* CONFIG_QDIO_DEBUG */
957
958 if (q->state==QDIO_IRQ_STATE_ACTIVE)
959 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
960 q->error_status_flags,
961 q->qdio_error,q->siga_error,q->q_no,start,count,
962 q->int_parm);
963
964 /* for the next time: */
965 q->first_element_to_kick=real_end;
966 q->qdio_error=0;
967 q->siga_error=0;
968 q->error_status_flags=0;
969}
970
971static void
972__qdio_outbound_processing(struct qdio_q *q)
973{
974 int siga_attempts;
975
976 QDIO_DBF_TEXT4(0,trace,"qoutproc");
977 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
978
979 if (unlikely(qdio_reserve_q(q))) {
980 qdio_release_q(q);
981 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched);
982 /* as we're sissies, we'll check next time */
983 if (likely(!atomic_read(&q->is_in_shutdown))) {
984 qdio_mark_q(q);
985 QDIO_DBF_TEXT4(0,trace,"busy,agn");
986 }
987 return;
988 }
989 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs);
990 qdio_perf_stat_inc(&perf_stats.tl_runs);
991
992 /* see comment in qdio_kick_outbound_q */
993 siga_attempts=atomic_read(&q->busy_siga_counter);
994 while (siga_attempts) {
995 atomic_dec(&q->busy_siga_counter);
996 qdio_kick_outbound_q(q);
997 siga_attempts--;
998 }
999
1000 if (qdio_has_outbound_q_moved(q))
1001 qdio_kick_outbound_handler(q);
1002
1003 if (q->queue_type == QDIO_ZFCP_QFMT) {
1004 if ((!q->hydra_gives_outbound_pcis) &&
1005 (!qdio_is_outbound_q_done(q)))
1006 qdio_mark_q(q);
1007 }
1008 else if (((!q->is_iqdio_q) && (!q->is_pci_out)) ||
1009 (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) {
1010 /*
1011 * make sure buffer switch from PRIMED to EMPTY is noticed
1012 * and outbound_handler is called
1013 */
1014 if (qdio_is_outbound_q_done(q)) {
1015 del_timer(&q->timer);
1016 } else {
1017 if (!timer_pending(&q->timer))
1018 mod_timer(&q->timer, jiffies +
1019 QDIO_FORCE_CHECK_TIMEOUT);
1020 }
1021 }
1022
1023 qdio_release_q(q);
1024}
1025
1026static void
1027qdio_outbound_processing(unsigned long q)
1028{
1029 __qdio_outbound_processing((struct qdio_q *) q);
1030}
1031
1032/************************* INBOUND ROUTINES *******************************/
1033
1034
1035static int
1036qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1037{
1038 struct qdio_irq *irq;
1039 int f,f_mod_no;
1040 volatile char *slsb;
1041 unsigned int count = 1;
1042 int first_not_to_check;
1043#ifdef CONFIG_QDIO_DEBUG
1044 char dbf_text[15];
1045#endif /* CONFIG_QDIO_DEBUG */
1046#ifdef QDIO_USE_PROCESSING_STATE
1047 int last_position=-1;
1048#endif /* QDIO_USE_PROCESSING_STATE */
1049
1050 QDIO_DBF_TEXT4(0,trace,"getibfro");
1051 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1052
1053 irq = (struct qdio_irq *) q->irq_ptr;
1054 if (irq->is_qebsm)
1055 return qdio_qebsm_get_inbound_buffer_frontier(q);
1056
1057 slsb=&q->slsb.acc.val[0];
1058 f_mod_no=f=q->first_to_check;
1059 /*
1060 * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
1061 * would return 0
1062 */
1063 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
1064 (QDIO_MAX_BUFFERS_PER_Q-1));
1065
1066 /*
1067 * we don't use this one, as a PCI or we after a thin interrupt
1068 * will sync the queues
1069 */
1070 /* SYNC_MEMORY;*/
1071
1072check_next:
1073 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
1074 if (f==first_not_to_check)
1075 goto out;
1076 switch (slsb[f_mod_no]) {
1077
1078 /* CU_EMPTY means frontier is reached */
1079 case SLSB_CU_INPUT_EMPTY:
1080 QDIO_DBF_TEXT5(0,trace,"inptempt");
1081 break;
1082
1083 /* P_PRIMED means set slsb to P_PROCESSING and move on */
1084 case SLSB_P_INPUT_PRIMED:
1085 QDIO_DBF_TEXT5(0,trace,"inptprim");
1086
1087#ifdef QDIO_USE_PROCESSING_STATE
1088 /*
1089 * as soon as running under VM, polling the input queues will
1090 * kill VM in terms of CP overhead
1091 */
1092 if (q->siga_sync) {
1093 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1094 } else {
1095 /* set the previous buffer to NOT_INIT. The current
1096 * buffer will be set to PROCESSING at the end of
1097 * this function to avoid further interrupts. */
1098 if (last_position>=0)
1099 set_slsb(q, &last_position,
1100 SLSB_P_INPUT_NOT_INIT, &count);
1101 atomic_set(&q->polling,1);
1102 last_position=f_mod_no;
1103 }
1104#else /* QDIO_USE_PROCESSING_STATE */
1105 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1106#endif /* QDIO_USE_PROCESSING_STATE */
1107 /*
1108 * not needed, as the inbound queue will be synced on the next
1109 * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
1110 */
1111 /*SYNC_MEMORY;*/
1112 f++;
1113 atomic_dec(&q->number_of_buffers_used);
1114 goto check_next;
1115
1116 case SLSB_P_INPUT_NOT_INIT:
1117 case SLSB_P_INPUT_PROCESSING:
1118 QDIO_DBF_TEXT5(0,trace,"inpnipro");
1119 break;
1120
1121 /* P_ERROR means frontier is reached, break and report error */
1122 case SLSB_P_INPUT_ERROR:
1123#ifdef CONFIG_QDIO_DEBUG
1124 sprintf(dbf_text,"inperr%2x",f_mod_no);
1125 QDIO_DBF_TEXT3(1,trace,dbf_text);
1126#endif /* CONFIG_QDIO_DEBUG */
1127 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
1128
1129 /* kind of process the buffer */
1130 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1131
1132 if (q->qdio_error)
1133 q->error_status_flags|=
1134 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
1135 q->qdio_error=SLSB_P_INPUT_ERROR;
1136 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
1137
1138 /* we increment the frontier, as this buffer
1139 * was processed obviously */
1140 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1141 atomic_dec(&q->number_of_buffers_used);
1142
1143#ifdef QDIO_USE_PROCESSING_STATE
1144 last_position=-1;
1145#endif /* QDIO_USE_PROCESSING_STATE */
1146
1147 break;
1148
1149 /* everything else means frontier not changed (HALTED or so) */
1150 default:
1151 break;
1152 }
1153out:
1154 q->first_to_check=f_mod_no;
1155
1156#ifdef QDIO_USE_PROCESSING_STATE
1157 if (last_position>=0)
1158 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1159#endif /* QDIO_USE_PROCESSING_STATE */
1160
1161 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
1162
1163 return q->first_to_check;
1164}
1165
1166static int
1167qdio_has_inbound_q_moved(struct qdio_q *q)
1168{
1169 int i;
1170
1171 i=qdio_get_inbound_buffer_frontier(q);
1172 if ( (i!=GET_SAVED_FRONTIER(q)) ||
1173 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
1174 SAVE_FRONTIER(q,i);
1175 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
1176 SAVE_TIMESTAMP(q);
1177
1178 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
1179 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1180 return 1;
1181 } else {
1182 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
1183 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1184 return 0;
1185 }
1186}
1187
1188/* means, no more buffers to be filled */
1189static int
1190tiqdio_is_inbound_q_done(struct qdio_q *q)
1191{
1192 int no_used;
1193 unsigned int start_buf, count;
1194 unsigned char state = 0;
1195 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1196
1197#ifdef CONFIG_QDIO_DEBUG
1198 char dbf_text[15];
1199#endif
1200
1201 no_used=atomic_read(&q->number_of_buffers_used);
1202
1203 /* propagate the change from 82 to 80 through VM */
1204 SYNC_MEMORY;
1205
1206#ifdef CONFIG_QDIO_DEBUG
1207 if (no_used) {
1208 sprintf(dbf_text,"iqisnt%02x",no_used);
1209 QDIO_DBF_TEXT4(0,trace,dbf_text);
1210 } else {
1211 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
1212 }
1213 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1214#endif /* CONFIG_QDIO_DEBUG */
1215
1216 if (!no_used)
1217 return 1;
1218 if (irq->is_qebsm) {
1219 count = 1;
1220 start_buf = q->first_to_check;
1221 qdio_do_eqbs(q, &state, &start_buf, &count);
1222 } else
1223 state = q->slsb.acc.val[q->first_to_check];
1224 if (state != SLSB_P_INPUT_PRIMED)
1225 /*
1226 * nothing more to do, if next buffer is not PRIMED.
1227 * note that we did a SYNC_MEMORY before, that there
1228 * has been a sychnronization.
1229 * we will return 0 below, as there is nothing to do
1230 * (stop_polling not necessary, as we have not been
1231 * using the PROCESSING state
1232 */
1233 return 0;
1234
1235 /*
1236 * ok, the next input buffer is primed. that means, that device state
1237 * change indicator and adapter local summary are set, so we will find
1238 * it next time.
1239 * we will return 0 below, as there is nothing to do, except scheduling
1240 * ourselves for the next time.
1241 */
1242 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1243 tiqdio_sched_tl();
1244 return 0;
1245}
1246
1247static int
1248qdio_is_inbound_q_done(struct qdio_q *q)
1249{
1250 int no_used;
1251 unsigned int start_buf, count;
1252 unsigned char state = 0;
1253 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1254
1255#ifdef CONFIG_QDIO_DEBUG
1256 char dbf_text[15];
1257#endif
1258
1259 no_used=atomic_read(&q->number_of_buffers_used);
1260
1261 /*
1262 * we need that one for synchronization with the adapter, as it
1263 * does a kind of PCI avoidance
1264 */
1265 SYNC_MEMORY;
1266
1267 if (!no_used) {
1268 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
1269 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1270 return 1;
1271 }
1272 if (irq->is_qebsm) {
1273 count = 1;
1274 start_buf = q->first_to_check;
1275 qdio_do_eqbs(q, &state, &start_buf, &count);
1276 } else
1277 state = q->slsb.acc.val[q->first_to_check];
1278 if (state == SLSB_P_INPUT_PRIMED) {
1279 /* we got something to do */
1280 QDIO_DBF_TEXT4(0,trace,"inqisntA");
1281 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1282 return 0;
1283 }
1284
1285 /* on VM, we don't poll, so the q is always done here */
1286 if (q->siga_sync)
1287 return 1;
1288 if (q->hydra_gives_outbound_pcis)
1289 return 1;
1290
1291 /*
1292 * at this point we know, that inbound first_to_check
1293 * has (probably) not moved (see qdio_inbound_processing)
1294 */
1295 if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
1296#ifdef CONFIG_QDIO_DEBUG
1297 QDIO_DBF_TEXT4(0,trace,"inqisdon");
1298 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1299 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1300 QDIO_DBF_TEXT4(0,trace,dbf_text);
1301#endif /* CONFIG_QDIO_DEBUG */
1302 return 1;
1303 } else {
1304#ifdef CONFIG_QDIO_DEBUG
1305 QDIO_DBF_TEXT4(0,trace,"inqisntd");
1306 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1307 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1308 QDIO_DBF_TEXT4(0,trace,dbf_text);
1309#endif /* CONFIG_QDIO_DEBUG */
1310 return 0;
1311 }
1312}
1313
1314static void
1315qdio_kick_inbound_handler(struct qdio_q *q)
1316{
1317 int count, start, end, real_end, i;
1318#ifdef CONFIG_QDIO_DEBUG
1319 char dbf_text[15];
1320#endif
1321
1322 QDIO_DBF_TEXT4(0,trace,"kickinh");
1323 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1324
1325 start=q->first_element_to_kick;
1326 real_end=q->first_to_check;
1327 end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1328
1329 i=start;
1330 count=0;
1331 while (1) {
1332 count++;
1333 if (i==end)
1334 break;
1335 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1336 }
1337
1338#ifdef CONFIG_QDIO_DEBUG
1339 sprintf(dbf_text,"s=%2xc=%2x",start,count);
1340 QDIO_DBF_TEXT4(0,trace,dbf_text);
1341#endif /* CONFIG_QDIO_DEBUG */
1342
1343 if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
1344 q->handler(q->cdev,
1345 QDIO_STATUS_INBOUND_INT|q->error_status_flags,
1346 q->qdio_error,q->siga_error,q->q_no,start,count,
1347 q->int_parm);
1348
1349 /* for the next time: */
1350 q->first_element_to_kick=real_end;
1351 q->qdio_error=0;
1352 q->siga_error=0;
1353 q->error_status_flags=0;
1354
1355 qdio_perf_stat_inc(&perf_stats.inbound_cnt);
1356}
1357
1358static void
1359__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1360{
1361 struct qdio_irq *irq_ptr;
1362 struct qdio_q *oq;
1363 int i;
1364
1365 QDIO_DBF_TEXT4(0,trace,"iqinproc");
1366 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1367
1368 /*
1369 * we first want to reserve the q, so that we know, that we don't
1370 * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
1371 * be set
1372 */
1373 if (unlikely(qdio_reserve_q(q))) {
1374 qdio_release_q(q);
1375 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1376 /*
1377 * as we might just be about to stop polling, we make
1378 * sure that we check again at least once more
1379 */
1380 tiqdio_sched_tl();
1381 return;
1382 }
1383 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs);
1384 if (unlikely(atomic_read(&q->is_in_shutdown))) {
1385 qdio_unmark_q(q);
1386 goto out;
1387 }
1388
1389 /*
1390 * we reset spare_ind_was_set, when the queue does not use the
1391 * spare indicator
1392 */
1393 if (spare_ind_was_set)
1394 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
1395
1396 if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
1397 goto out;
1398 /*
1399 * q->dev_st_chg_ind is the indicator, be it shared or not.
1400 * only clear it, if indicator is non-shared
1401 */
1402 if (q->dev_st_chg_ind != &spare_indicator)
1403 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1404
1405 if (q->hydra_gives_outbound_pcis) {
1406 if (!q->siga_sync_done_on_thinints) {
1407 SYNC_MEMORY_ALL;
1408 } else if (!q->siga_sync_done_on_outb_tis) {
1409 SYNC_MEMORY_ALL_OUTB;
1410 }
1411 } else {
1412 SYNC_MEMORY;
1413 }
1414 /*
1415 * maybe we have to do work on our outbound queues... at least
1416 * we have to check the outbound-int-capable thinint-capable
1417 * queues
1418 */
1419 if (q->hydra_gives_outbound_pcis) {
1420 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1421 for (i=0;i<irq_ptr->no_output_qs;i++) {
1422 oq = irq_ptr->output_qs[i];
1423 if (!qdio_is_outbound_q_done(oq)) {
1424 qdio_perf_stat_dec(&perf_stats.tl_runs);
1425 __qdio_outbound_processing(oq);
1426 }
1427 }
1428 }
1429
1430 if (!qdio_has_inbound_q_moved(q))
1431 goto out;
1432
1433 qdio_kick_inbound_handler(q);
1434 if (tiqdio_is_inbound_q_done(q))
1435 if (!qdio_stop_polling(q)) {
1436 /*
1437 * we set the flags to get into the stuff next time,
1438 * see also comment in qdio_stop_polling
1439 */
1440 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1441 tiqdio_sched_tl();
1442 }
1443out:
1444 qdio_release_q(q);
1445}
1446
1447static void
1448tiqdio_inbound_processing(unsigned long q)
1449{
1450 __tiqdio_inbound_processing((struct qdio_q *) q,
1451 atomic_read(&spare_indicator_usecount));
1452}
1453
1454static void
1455__qdio_inbound_processing(struct qdio_q *q)
1456{
1457 int q_laps=0;
1458
1459 QDIO_DBF_TEXT4(0,trace,"qinproc");
1460 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1461
1462 if (unlikely(qdio_reserve_q(q))) {
1463 qdio_release_q(q);
1464 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched);
1465 /* as we're sissies, we'll check next time */
1466 if (likely(!atomic_read(&q->is_in_shutdown))) {
1467 qdio_mark_q(q);
1468 QDIO_DBF_TEXT4(0,trace,"busy,agn");
1469 }
1470 return;
1471 }
1472 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs);
1473 qdio_perf_stat_inc(&perf_stats.tl_runs);
1474
1475again:
1476 if (qdio_has_inbound_q_moved(q)) {
1477 qdio_kick_inbound_handler(q);
1478 if (!qdio_stop_polling(q)) {
1479 q_laps++;
1480 if (q_laps<QDIO_Q_LAPS)
1481 goto again;
1482 }
1483 qdio_mark_q(q);
1484 } else {
1485 if (!qdio_is_inbound_q_done(q))
1486 /* means poll time is not yet over */
1487 qdio_mark_q(q);
1488 }
1489
1490 qdio_release_q(q);
1491}
1492
1493static void
1494qdio_inbound_processing(unsigned long q)
1495{
1496 __qdio_inbound_processing((struct qdio_q *) q);
1497}
1498
1499/************************* MAIN ROUTINES *******************************/
1500
1501#ifdef QDIO_USE_PROCESSING_STATE
1502static int
1503tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1504{
1505 if (!q) {
1506 tiqdio_sched_tl();
1507 return 0;
1508 }
1509
1510 /*
1511 * under VM, we have not used the PROCESSING state, so no
1512 * need to stop polling
1513 */
1514 if (q->siga_sync)
1515 return 2;
1516
1517 if (unlikely(qdio_reserve_q(q))) {
1518 qdio_release_q(q);
1519 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1520 /*
1521 * as we might just be about to stop polling, we make
1522 * sure that we check again at least once more
1523 */
1524
1525 /*
1526 * sanity -- we'd get here without setting the
1527 * dev st chg ind
1528 */
1529 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1530 tiqdio_sched_tl();
1531 return 0;
1532 }
1533 if (qdio_stop_polling(q)) {
1534 qdio_release_q(q);
1535 return 2;
1536 }
1537 if (q_laps<QDIO_Q_LAPS-1) {
1538 qdio_release_q(q);
1539 return 3;
1540 }
1541 /*
1542 * we set the flags to get into the stuff
1543 * next time, see also comment in qdio_stop_polling
1544 */
1545 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1546 tiqdio_sched_tl();
1547 qdio_release_q(q);
1548 return 1;
1549
1550}
1551#endif /* QDIO_USE_PROCESSING_STATE */
1552
1553static void
1554tiqdio_inbound_checks(void)
1555{
1556 struct qdio_q *q;
1557 int spare_ind_was_set=0;
1558#ifdef QDIO_USE_PROCESSING_STATE
1559 int q_laps=0;
1560#endif /* QDIO_USE_PROCESSING_STATE */
1561
1562 QDIO_DBF_TEXT4(0,trace,"iqdinbck");
1563 QDIO_DBF_TEXT5(0,trace,"iqlocsum");
1564
1565#ifdef QDIO_USE_PROCESSING_STATE
1566again:
1567#endif /* QDIO_USE_PROCESSING_STATE */
1568
1569 /* when the spare indicator is used and set, save that and clear it */
1570 if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
1571 spare_ind_was_set = 1;
1572 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
1573 }
1574
1575 q=(struct qdio_q*)tiq_list;
1576 do {
1577 if (!q)
1578 break;
1579 __tiqdio_inbound_processing(q, spare_ind_was_set);
1580 q=(struct qdio_q*)q->list_next;
1581 } while (q!=(struct qdio_q*)tiq_list);
1582
1583#ifdef QDIO_USE_PROCESSING_STATE
1584 q=(struct qdio_q*)tiq_list;
1585 do {
1586 int ret;
1587
1588 ret = tiqdio_reset_processing_state(q, q_laps);
1589 switch (ret) {
1590 case 0:
1591 return;
1592 case 1:
1593 q_laps++;
1594 case 2:
1595 q = (struct qdio_q*)q->list_next;
1596 break;
1597 default:
1598 q_laps++;
1599 goto again;
1600 }
1601 } while (q!=(struct qdio_q*)tiq_list);
1602#endif /* QDIO_USE_PROCESSING_STATE */
1603}
1604
1605static void
1606tiqdio_tl(unsigned long data)
1607{
1608 QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1609
1610 qdio_perf_stat_inc(&perf_stats.tl_runs);
1611
1612 tiqdio_inbound_checks();
1613}
1614
1615/********************* GENERAL HELPER_ROUTINES ***********************/
1616
1617static void
1618qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1619{
1620 int i;
1621 struct qdio_q *q;
1622
1623 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
1624 q = irq_ptr->input_qs[i];
1625 if (q) {
1626 free_page((unsigned long) q->slib);
1627 kmem_cache_free(qdio_q_cache, q);
1628 }
1629 q = irq_ptr->output_qs[i];
1630 if (q) {
1631 free_page((unsigned long) q->slib);
1632 kmem_cache_free(qdio_q_cache, q);
1633 }
1634 }
1635 free_page((unsigned long) irq_ptr->qdr);
1636 free_page((unsigned long) irq_ptr);
1637}
1638
1639static void
1640qdio_set_impl_params(struct qdio_irq *irq_ptr,
1641 unsigned int qib_param_field_format,
1642 /* pointer to 128 bytes or NULL, if no param field */
1643 unsigned char *qib_param_field,
1644 /* pointer to no_queues*128 words of data or NULL */
1645 unsigned int no_input_qs,
1646 unsigned int no_output_qs,
1647 unsigned long *input_slib_elements,
1648 unsigned long *output_slib_elements)
1649{
1650 int i,j;
1651
1652 if (!irq_ptr)
1653 return;
1654
1655 irq_ptr->qib.pfmt=qib_param_field_format;
1656 if (qib_param_field)
1657 memcpy(irq_ptr->qib.parm,qib_param_field,
1658 QDIO_MAX_BUFFERS_PER_Q);
1659
1660 if (input_slib_elements)
1661 for (i=0;i<no_input_qs;i++) {
1662 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1663 irq_ptr->input_qs[i]->slib->slibe[j].parms=
1664 input_slib_elements[
1665 i*QDIO_MAX_BUFFERS_PER_Q+j];
1666 }
1667 if (output_slib_elements)
1668 for (i=0;i<no_output_qs;i++) {
1669 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1670 irq_ptr->output_qs[i]->slib->slibe[j].parms=
1671 output_slib_elements[
1672 i*QDIO_MAX_BUFFERS_PER_Q+j];
1673 }
1674}
1675
1676static int
1677qdio_alloc_qs(struct qdio_irq *irq_ptr,
1678 int no_input_qs, int no_output_qs)
1679{
1680 int i;
1681 struct qdio_q *q;
1682
1683 for (i = 0; i < no_input_qs; i++) {
1684 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1685 if (!q)
1686 return -ENOMEM;
1687 memset(q, 0, sizeof(*q));
1688
1689 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1690 if (!q->slib) {
1691 kmem_cache_free(qdio_q_cache, q);
1692 return -ENOMEM;
1693 }
1694 irq_ptr->input_qs[i]=q;
1695 }
1696
1697 for (i = 0; i < no_output_qs; i++) {
1698 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1699 if (!q)
1700 return -ENOMEM;
1701 memset(q, 0, sizeof(*q));
1702
1703 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1704 if (!q->slib) {
1705 kmem_cache_free(qdio_q_cache, q);
1706 return -ENOMEM;
1707 }
1708 irq_ptr->output_qs[i]=q;
1709 }
1710 return 0;
1711}
1712
1713static void
1714qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1715 int no_input_qs, int no_output_qs,
1716 qdio_handler_t *input_handler,
1717 qdio_handler_t *output_handler,
1718 unsigned long int_parm,int q_format,
1719 unsigned long flags,
1720 void **inbound_sbals_array,
1721 void **outbound_sbals_array)
1722{
1723 struct qdio_q *q;
1724 int i,j;
1725 char dbf_text[20]; /* see qdio_initialize */
1726 void *ptr;
1727 int available;
1728
1729 sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
1730 QDIO_DBF_TEXT0(0,setup,dbf_text);
1731 for (i=0;i<no_input_qs;i++) {
1732 q=irq_ptr->input_qs[i];
1733
1734 memset(q,0,((char*)&q->slib)-((char*)q));
1735 sprintf(dbf_text,"in-q%4x",i);
1736 QDIO_DBF_TEXT0(0,setup,dbf_text);
1737 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1738
1739 memset(q->slib,0,PAGE_SIZE);
1740 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1741
1742 available=0;
1743
1744 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1745 q->sbal[j]=*(inbound_sbals_array++);
1746
1747 q->queue_type=q_format;
1748 q->int_parm=int_parm;
1749 q->schid = irq_ptr->schid;
1750 q->irq_ptr = irq_ptr;
1751 q->cdev = cdev;
1752 q->mask=1<<(31-i);
1753 q->q_no=i;
1754 q->is_input_q=1;
1755 q->first_to_check=0;
1756 q->last_move_ftc=0;
1757 q->handler=input_handler;
1758 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1759
1760 /* q->is_thinint_q isn't valid at this time, but
1761 * irq_ptr->is_thinint_irq is
1762 */
1763 if (irq_ptr->is_thinint_irq)
1764 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
1765 (unsigned long) q);
1766 else
1767 tasklet_init(&q->tasklet, qdio_inbound_processing,
1768 (unsigned long) q);
1769
1770 /* actually this is not used for inbound queues. yet. */
1771 atomic_set(&q->busy_siga_counter,0);
1772 q->timing.busy_start=0;
1773
1774/* for (j=0;j<QDIO_STATS_NUMBER;j++)
1775 q->timing.last_transfer_times[j]=(qdio_get_micros()/
1776 QDIO_STATS_NUMBER)*j;
1777 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
1778*/
1779
1780 /* fill in slib */
1781 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
1782 (unsigned long)(q->slib);
1783 q->slib->sla=(unsigned long)(q->sl);
1784 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1785
1786 /* fill in sl */
1787 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1788 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1789
1790 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1791 ptr=(void*)q->sl;
1792 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1793 ptr=(void*)&q->slsb;
1794 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1795 ptr=(void*)q->sbal[0];
1796 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1797
1798 /* fill in slsb */
1799 if (!irq_ptr->is_qebsm) {
1800 unsigned int count = 1;
1801 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1802 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1803 }
1804 }
1805
1806 for (i=0;i<no_output_qs;i++) {
1807 q=irq_ptr->output_qs[i];
1808 memset(q,0,((char*)&q->slib)-((char*)q));
1809
1810 sprintf(dbf_text,"outq%4x",i);
1811 QDIO_DBF_TEXT0(0,setup,dbf_text);
1812 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1813
1814 memset(q->slib,0,PAGE_SIZE);
1815 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1816
1817 available=0;
1818
1819 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1820 q->sbal[j]=*(outbound_sbals_array++);
1821
1822 q->queue_type=q_format;
1823 if ((q->queue_type == QDIO_IQDIO_QFMT) &&
1824 (no_output_qs > 1) &&
1825 (i == no_output_qs-1))
1826 q->queue_type = QDIO_IQDIO_QFMT_ASYNCH;
1827 q->int_parm=int_parm;
1828 q->is_input_q=0;
1829 q->is_pci_out = 0;
1830 q->schid = irq_ptr->schid;
1831 q->cdev = cdev;
1832 q->irq_ptr = irq_ptr;
1833 q->mask=1<<(31-i);
1834 q->q_no=i;
1835 q->first_to_check=0;
1836 q->last_move_ftc=0;
1837 q->handler=output_handler;
1838
1839 tasklet_init(&q->tasklet, qdio_outbound_processing,
1840 (unsigned long) q);
1841 setup_timer(&q->timer, qdio_outbound_processing,
1842 (unsigned long) q);
1843
1844 atomic_set(&q->busy_siga_counter,0);
1845 q->timing.busy_start=0;
1846
1847 /* fill in slib */
1848 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
1849 (unsigned long)(q->slib);
1850 q->slib->sla=(unsigned long)(q->sl);
1851 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1852
1853 /* fill in sl */
1854 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1855 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1856
1857 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1858 ptr=(void*)q->sl;
1859 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1860 ptr=(void*)&q->slsb;
1861 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1862 ptr=(void*)q->sbal[0];
1863 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1864
1865 /* fill in slsb */
1866 if (!irq_ptr->is_qebsm) {
1867 unsigned int count = 1;
1868 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1869 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1870 }
1871 }
1872}
1873
1874static void
1875qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1876 unsigned int no_input_qs,
1877 unsigned int no_output_qs,
1878 unsigned int min_input_threshold,
1879 unsigned int max_input_threshold,
1880 unsigned int min_output_threshold,
1881 unsigned int max_output_threshold)
1882{
1883 int i;
1884 struct qdio_q *q;
1885
1886 for (i=0;i<no_input_qs;i++) {
1887 q=irq_ptr->input_qs[i];
1888 q->timing.threshold=max_input_threshold;
1889/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1890 q->threshold_classes[j].threshold=
1891 min_input_threshold+
1892 (max_input_threshold-min_input_threshold)/
1893 QDIO_STATS_CLASSES;
1894 }
1895 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1896 }
1897 for (i=0;i<no_output_qs;i++) {
1898 q=irq_ptr->output_qs[i];
1899 q->timing.threshold=max_output_threshold;
1900/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1901 q->threshold_classes[j].threshold=
1902 min_output_threshold+
1903 (max_output_threshold-min_output_threshold)/
1904 QDIO_STATS_CLASSES;
1905 }
1906 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1907 }
1908}
1909
1910static void tiqdio_thinint_handler(void *ind, void *drv_data)
1911{
1912 QDIO_DBF_TEXT4(0,trace,"thin_int");
1913
1914 qdio_perf_stat_inc(&perf_stats.thinints);
1915
1916 /* SVS only when needed:
1917 * issue SVS to benefit from iqdio interrupt avoidance
1918 * (SVS clears AISOI)*/
1919 if (!omit_svs)
1920 tiqdio_clear_global_summary();
1921
1922 tiqdio_inbound_checks();
1923}
1924
1925static void
1926qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1927{
1928 int i;
1929#ifdef CONFIG_QDIO_DEBUG
1930 char dbf_text[15];
1931
1932 QDIO_DBF_TEXT5(0,trace,"newstate");
1933 sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1934 QDIO_DBF_TEXT5(0,trace,dbf_text);
1935#endif /* CONFIG_QDIO_DEBUG */
1936
1937 irq_ptr->state=state;
1938 for (i=0;i<irq_ptr->no_input_qs;i++)
1939 irq_ptr->input_qs[i]->state=state;
1940 for (i=0;i<irq_ptr->no_output_qs;i++)
1941 irq_ptr->output_qs[i]->state=state;
1942 mb();
1943}
1944
1945static void
1946qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1947{
1948 char dbf_text[15];
1949
1950 if (irb->esw.esw0.erw.cons) {
1951 sprintf(dbf_text,"sens%4x",schid.sch_no);
1952 QDIO_DBF_TEXT2(1,trace,dbf_text);
1953 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1954
1955 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1956 QDIO_HEXDUMP16(WARN,"irb: ",irb);
1957 QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
1958 }
1959
1960}
1961
1962static void
1963qdio_handle_pci(struct qdio_irq *irq_ptr)
1964{
1965 int i;
1966 struct qdio_q *q;
1967
1968 qdio_perf_stat_inc(&perf_stats.pcis);
1969 for (i=0;i<irq_ptr->no_input_qs;i++) {
1970 q=irq_ptr->input_qs[i];
1971 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1972 qdio_mark_q(q);
1973 else {
1974 qdio_perf_stat_dec(&perf_stats.tl_runs);
1975 __qdio_inbound_processing(q);
1976 }
1977 }
1978 if (!irq_ptr->hydra_gives_outbound_pcis)
1979 return;
1980 for (i=0;i<irq_ptr->no_output_qs;i++) {
1981 q=irq_ptr->output_qs[i];
1982 if (qdio_is_outbound_q_done(q))
1983 continue;
1984 qdio_perf_stat_dec(&perf_stats.tl_runs);
1985 if (!irq_ptr->sync_done_on_outb_pcis)
1986 SYNC_MEMORY;
1987 __qdio_outbound_processing(q);
1988 }
1989}
1990
1991static void qdio_establish_handle_irq(struct ccw_device*, int, int);
1992
1993static void
1994qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
1995 int cstat, int dstat)
1996{
1997 struct qdio_irq *irq_ptr;
1998 struct qdio_q *q;
1999 char dbf_text[15];
2000
2001 irq_ptr = cdev->private->qdio_data;
2002
2003 QDIO_DBF_TEXT2(1, trace, "ick2");
2004 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2005 QDIO_DBF_TEXT2(1,trace,dbf_text);
2006 QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
2007 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2008 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2009 QDIO_PRINT_ERR("received check condition on activate " \
2010 "queues on device %s (cs=x%x, ds=x%x).\n",
2011 cdev->dev.bus_id, cstat, dstat);
2012 if (irq_ptr->no_input_qs) {
2013 q=irq_ptr->input_qs[0];
2014 } else if (irq_ptr->no_output_qs) {
2015 q=irq_ptr->output_qs[0];
2016 } else {
2017 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
2018 cdev->dev.bus_id);
2019 goto omit_handler_call;
2020 }
2021 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
2022 QDIO_STATUS_LOOK_FOR_ERROR,
2023 0,0,0,-1,-1,q->int_parm);
2024omit_handler_call:
2025 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
2026
2027}
2028
2029static void
2030qdio_call_shutdown(struct work_struct *work)
2031{
2032 struct ccw_device_private *priv;
2033 struct ccw_device *cdev;
2034
2035 priv = container_of(work, struct ccw_device_private, kick_work);
2036 cdev = priv->cdev;
2037 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2038 put_device(&cdev->dev);
2039}
2040
2041static void
2042qdio_timeout_handler(struct ccw_device *cdev)
2043{
2044 struct qdio_irq *irq_ptr;
2045 char dbf_text[15];
2046
2047 QDIO_DBF_TEXT2(0, trace, "qtoh");
2048 sprintf(dbf_text, "%s", cdev->dev.bus_id);
2049 QDIO_DBF_TEXT2(0, trace, dbf_text);
2050
2051 irq_ptr = cdev->private->qdio_data;
2052 sprintf(dbf_text, "state:%d", irq_ptr->state);
2053 QDIO_DBF_TEXT2(0, trace, dbf_text);
2054
2055 switch (irq_ptr->state) {
2056 case QDIO_IRQ_STATE_INACTIVE:
2057 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
2058 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2059 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
2060 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2061 break;
2062 case QDIO_IRQ_STATE_CLEANUP:
2063 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
2064 "irq=0.%x.%x.\n",
2065 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2066 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2067 break;
2068 case QDIO_IRQ_STATE_ESTABLISHED:
2069 case QDIO_IRQ_STATE_ACTIVE:
2070 /* I/O has been terminated by common I/O layer. */
2071 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
2072 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2073 QDIO_DBF_TEXT2(1, trace, "cio:term");
2074 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
2075 if (get_device(&cdev->dev)) {
2076 /* Can't call shutdown from interrupt context. */
2077 PREPARE_WORK(&cdev->private->kick_work,
2078 qdio_call_shutdown);
2079 queue_work(ccw_device_work, &cdev->private->kick_work);
2080 }
2081 break;
2082 default:
2083 BUG();
2084 }
2085 wake_up(&cdev->private->wait_q);
2086}
2087
2088static void
2089qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2090{
2091 struct qdio_irq *irq_ptr;
2092 int cstat,dstat;
2093 char dbf_text[15];
2094
2095#ifdef CONFIG_QDIO_DEBUG
2096 QDIO_DBF_TEXT4(0, trace, "qint");
2097 sprintf(dbf_text, "%s", cdev->dev.bus_id);
2098 QDIO_DBF_TEXT4(0, trace, dbf_text);
2099#endif /* CONFIG_QDIO_DEBUG */
2100
2101 if (!intparm) {
2102 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
2103 "handler, device %s\n", cdev->dev.bus_id);
2104 return;
2105 }
2106
2107 irq_ptr = cdev->private->qdio_data;
2108 if (!irq_ptr) {
2109 QDIO_DBF_TEXT2(1, trace, "uint");
2110 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2111 QDIO_DBF_TEXT2(1,trace,dbf_text);
2112 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
2113 cdev->dev.bus_id);
2114 return;
2115 }
2116
2117 if (IS_ERR(irb)) {
2118 /* Currently running i/o is in error. */
2119 switch (PTR_ERR(irb)) {
2120 case -EIO:
2121 QDIO_PRINT_ERR("i/o error on device %s\n",
2122 cdev->dev.bus_id);
2123 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2124 wake_up(&cdev->private->wait_q);
2125 return;
2126 case -ETIMEDOUT:
2127 qdio_timeout_handler(cdev);
2128 return;
2129 default:
2130 QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
2131 PTR_ERR(irb), cdev->dev.bus_id);
2132 return;
2133 }
2134 }
2135
2136 qdio_irq_check_sense(irq_ptr->schid, irb);
2137
2138#ifdef CONFIG_QDIO_DEBUG
2139 sprintf(dbf_text, "state:%d", irq_ptr->state);
2140 QDIO_DBF_TEXT4(0, trace, dbf_text);
2141#endif /* CONFIG_QDIO_DEBUG */
2142
2143 cstat = irb->scsw.cmd.cstat;
2144 dstat = irb->scsw.cmd.dstat;
2145
2146 switch (irq_ptr->state) {
2147 case QDIO_IRQ_STATE_INACTIVE:
2148 qdio_establish_handle_irq(cdev, cstat, dstat);
2149 break;
2150
2151 case QDIO_IRQ_STATE_CLEANUP:
2152 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2153 break;
2154
2155 case QDIO_IRQ_STATE_ESTABLISHED:
2156 case QDIO_IRQ_STATE_ACTIVE:
2157 if (cstat & SCHN_STAT_PCI) {
2158 qdio_handle_pci(irq_ptr);
2159 break;
2160 }
2161
2162 if ((cstat&~SCHN_STAT_PCI)||dstat) {
2163 qdio_handle_activate_check(cdev, intparm, cstat, dstat);
2164 break;
2165 }
2166 default:
2167 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
2168 "device %s?!\n",
2169 irq_ptr->state, cdev->dev.bus_id);
2170 }
2171 wake_up(&cdev->private->wait_q);
2172
2173}
2174
2175int
2176qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2177 unsigned int queue_number)
2178{
2179 int cc = 0;
2180 struct qdio_q *q;
2181 struct qdio_irq *irq_ptr;
2182 void *ptr;
2183#ifdef CONFIG_QDIO_DEBUG
2184 char dbf_text[15]="SyncXXXX";
2185#endif
2186
2187 irq_ptr = cdev->private->qdio_data;
2188 if (!irq_ptr)
2189 return -ENODEV;
2190
2191#ifdef CONFIG_QDIO_DEBUG
2192 *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
2193 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2194 *((int*)(&dbf_text[0]))=flags;
2195 *((int*)(&dbf_text[4]))=queue_number;
2196 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2197#endif /* CONFIG_QDIO_DEBUG */
2198
2199 if (flags&QDIO_FLAG_SYNC_INPUT) {
2200 q=irq_ptr->input_qs[queue_number];
2201 if (!q)
2202 return -EINVAL;
2203 if (!(irq_ptr->is_qebsm))
2204 cc = do_siga_sync(q->schid, 0, q->mask);
2205 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
2206 q=irq_ptr->output_qs[queue_number];
2207 if (!q)
2208 return -EINVAL;
2209 if (!(irq_ptr->is_qebsm))
2210 cc = do_siga_sync(q->schid, q->mask, 0);
2211 } else
2212 return -EINVAL;
2213
2214 ptr=&cc;
2215 if (cc)
2216 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
2217
2218 return cc;
2219}
2220
2221static int
2222qdio_get_ssqd_information(struct subchannel_id *schid,
2223 struct qdio_chsc_ssqd **ssqd_area)
2224{
2225 int result;
2226
2227 QDIO_DBF_TEXT0(0, setup, "getssqd");
2228 *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2229 if (!ssqd_area) {
2230 QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
2231 schid->sch_no);
2232 return -ENOMEM;
2233 }
2234
2235 (*ssqd_area)->request = (struct chsc_header) {
2236 .length = 0x0010,
2237 .code = 0x0024,
2238 };
2239 (*ssqd_area)->first_sch = schid->sch_no;
2240 (*ssqd_area)->last_sch = schid->sch_no;
2241 (*ssqd_area)->ssid = schid->ssid;
2242 result = chsc(*ssqd_area);
2243
2244 if (result) {
2245 QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
2246 result, schid->ssid, schid->sch_no);
2247 goto out;
2248 }
2249
2250 if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2251 QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
2252 (*ssqd_area)->response.code,
2253 schid->ssid, schid->sch_no);
2254 goto out;
2255 }
2256 if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2257 !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
2258 ((*ssqd_area)->sch != schid->sch_no)) {
2259 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2260 "using all SIGAs.\n",
2261 schid->ssid, schid->sch_no);
2262 goto out;
2263 }
2264 return 0;
2265out:
2266 return -EINVAL;
2267}
2268
2269int
2270qdio_get_ssqd_pct(struct ccw_device *cdev)
2271{
2272 struct qdio_chsc_ssqd *ssqd_area;
2273 struct subchannel_id schid;
2274 char dbf_text[15];
2275 int rc;
2276 int pct = 0;
2277
2278 QDIO_DBF_TEXT0(0, setup, "getpct");
2279 schid = ccw_device_get_subchannel_id(cdev);
2280 rc = qdio_get_ssqd_information(&schid, &ssqd_area);
2281 if (!rc)
2282 pct = (int)ssqd_area->pct;
2283 if (rc != -ENOMEM)
2284 mempool_free(ssqd_area, qdio_mempool_scssc);
2285 sprintf(dbf_text, "pct: %d", pct);
2286 QDIO_DBF_TEXT2(0, setup, dbf_text);
2287 return pct;
2288}
2289EXPORT_SYMBOL(qdio_get_ssqd_pct);
2290
2291static void
2292qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
2293{
2294 struct qdio_q *q;
2295 int i;
2296 unsigned int count, start_buf;
2297 char dbf_text[15];
2298
2299 /*check if QEBSM is disabled */
2300 if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
2301 irq_ptr->is_qebsm = 0;
2302 irq_ptr->sch_token = 0;
2303 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2304 QDIO_DBF_TEXT0(0,setup,"noV=V");
2305 return;
2306 }
2307 irq_ptr->sch_token = token;
2308 /*input queue*/
2309 for (i = 0; i < irq_ptr->no_input_qs;i++) {
2310 q = irq_ptr->input_qs[i];
2311 count = QDIO_MAX_BUFFERS_PER_Q;
2312 start_buf = 0;
2313 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2314 }
2315 sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2316 QDIO_DBF_TEXT0(0,setup,dbf_text);
2317 sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2318 QDIO_DBF_TEXT0(0,setup,dbf_text);
2319 /*output queue*/
2320 for (i = 0; i < irq_ptr->no_output_qs; i++) {
2321 q = irq_ptr->output_qs[i];
2322 count = QDIO_MAX_BUFFERS_PER_Q;
2323 start_buf = 0;
2324 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2325 }
2326}
2327
2328static void
2329qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
2330{
2331 int rc;
2332 struct qdio_chsc_ssqd *ssqd_area;
2333
2334 QDIO_DBF_TEXT0(0,setup,"getssqd");
2335 irq_ptr->qdioac = 0;
2336 rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
2337 if (rc) {
2338 QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
2339 irq_ptr->schid.sch_no);
2340 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2341 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2342 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2343 irq_ptr->is_qebsm = 0;
2344 } else
2345 irq_ptr->qdioac = ssqd_area->qdioac1;
2346
2347 qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
2348 if (rc != -ENOMEM)
2349 mempool_free(ssqd_area, qdio_mempool_scssc);
2350}
2351
2352static unsigned int
2353tiqdio_check_chsc_availability(void)
2354{
2355 char dbf_text[15];
2356
2357 /* Check for bit 41. */
2358 if (!css_general_characteristics.aif) {
2359 QDIO_PRINT_WARN("Adapter interruption facility not " \
2360 "installed.\n");
2361 return -ENOENT;
2362 }
2363
2364 /* Check for bits 107 and 108. */
2365 if (!css_chsc_characteristics.scssc ||
2366 !css_chsc_characteristics.scsscf) {
2367 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
2368 "not available.\n");
2369 return -ENOENT;
2370 }
2371
2372 /* Check for OSA/FCP thin interrupts (bit 67). */
2373 hydra_thinints = css_general_characteristics.aif_osa;
2374 sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2375 QDIO_DBF_TEXT0(0,setup,dbf_text);
2376
2377#ifdef CONFIG_64BIT
2378 /* Check for QEBSM support in general (bit 58). */
2379 is_passthrough = css_general_characteristics.qebsm;
2380#endif
2381 sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2382 QDIO_DBF_TEXT0(0,setup,dbf_text);
2383
2384 /* Check for aif time delay disablement fac (bit 56). If installed,
2385 * omit svs even under lpar (good point by rick again) */
2386 omit_svs = css_general_characteristics.aif_tdd;
2387 sprintf(dbf_text,"omitsvs%1x", omit_svs);
2388 QDIO_DBF_TEXT0(0,setup,dbf_text);
2389 return 0;
2390}
2391
2392
2393static unsigned int
2394tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2395{
2396 unsigned long real_addr_local_summary_bit;
2397 unsigned long real_addr_dev_st_chg_ind;
2398 void *ptr;
2399 char dbf_text[15];
2400
2401 unsigned int resp_code;
2402 int result;
2403
2404 struct {
2405 struct chsc_header request;
2406 u16 operation_code;
2407 u16 reserved1;
2408 u32 reserved2;
2409 u32 reserved3;
2410 u64 summary_indicator_addr;
2411 u64 subchannel_indicator_addr;
2412 u32 ks:4;
2413 u32 kc:4;
2414 u32 reserved4:21;
2415 u32 isc:3;
2416 u32 word_with_d_bit;
2417 /* set to 0x10000000 to enable
2418 * time delay disablement facility */
2419 u32 reserved5;
2420 struct subchannel_id schid;
2421 u32 reserved6[1004];
2422 struct chsc_header response;
2423 u32 reserved7;
2424 } *scssc_area;
2425
2426 if (!irq_ptr->is_thinint_irq)
2427 return -ENODEV;
2428
2429 if (reset_to_zero) {
2430 real_addr_local_summary_bit=0;
2431 real_addr_dev_st_chg_ind=0;
2432 } else {
2433 real_addr_local_summary_bit=
2434 virt_to_phys((volatile void *)tiqdio_ind);
2435 real_addr_dev_st_chg_ind=
2436 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2437 }
2438
2439 scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2440 if (!scssc_area) {
2441 QDIO_PRINT_WARN("No memory for setting indicators on " \
2442 "subchannel 0.%x.%x.\n",
2443 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2444 return -ENOMEM;
2445 }
2446 scssc_area->request = (struct chsc_header) {
2447 .length = 0x0fe0,
2448 .code = 0x0021,
2449 };
2450 scssc_area->operation_code = 0;
2451
2452 scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
2453 scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
2454 scssc_area->ks = QDIO_STORAGE_KEY;
2455 scssc_area->kc = QDIO_STORAGE_KEY;
2456 scssc_area->isc = TIQDIO_THININT_ISC;
2457 scssc_area->schid = irq_ptr->schid;
2458 /* enables the time delay disablement facility. Don't care
2459 * whether it is really there (i.e. we haven't checked for
2460 * it) */
2461 if (css_general_characteristics.aif_tdd)
2462 scssc_area->word_with_d_bit = 0x10000000;
2463 else
2464 QDIO_PRINT_WARN("Time delay disablement facility " \
2465 "not available\n");
2466
2467 result = chsc(scssc_area);
2468 if (result) {
2469 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2470 "cc=%i.\n",
2471 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2472 result = -EIO;
2473 goto out;
2474 }
2475
2476 resp_code = scssc_area->response.code;
2477 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2478 QDIO_PRINT_WARN("response upon setting indicators " \
2479 "is 0x%x.\n",resp_code);
2480 sprintf(dbf_text,"sidR%4x",resp_code);
2481 QDIO_DBF_TEXT1(0,trace,dbf_text);
2482 QDIO_DBF_TEXT1(0,setup,dbf_text);
2483 ptr=&scssc_area->response;
2484 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
2485 result = -EIO;
2486 goto out;
2487 }
2488
2489 QDIO_DBF_TEXT2(0,setup,"setscind");
2490 QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
2491 sizeof(unsigned long));
2492 QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2493 result = 0;
2494out:
2495 mempool_free(scssc_area, qdio_mempool_scssc);
2496 return result;
2497
2498}
2499
2500static unsigned int
2501tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2502{
2503 unsigned int resp_code;
2504 int result;
2505 void *ptr;
2506 char dbf_text[15];
2507
2508 struct {
2509 struct chsc_header request;
2510 u16 operation_code;
2511 u16 reserved1;
2512 u32 reserved2;
2513 u32 reserved3;
2514 u32 reserved4[2];
2515 u32 delay_target;
2516 u32 reserved5[1009];
2517 struct chsc_header response;
2518 u32 reserved6;
2519 } *scsscf_area;
2520
2521 if (!irq_ptr->is_thinint_irq)
2522 return -ENODEV;
2523
2524 scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2525 if (!scsscf_area) {
2526 QDIO_PRINT_WARN("No memory for setting delay target on " \
2527 "subchannel 0.%x.%x.\n",
2528 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2529 return -ENOMEM;
2530 }
2531 scsscf_area->request = (struct chsc_header) {
2532 .length = 0x0fe0,
2533 .code = 0x1027,
2534 };
2535
2536 scsscf_area->delay_target = delay_target<<16;
2537
2538 result=chsc(scsscf_area);
2539 if (result) {
2540 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2541 "cc=%i. Continuing.\n",
2542 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2543 result);
2544 result = -EIO;
2545 goto out;
2546 }
2547
2548 resp_code = scsscf_area->response.code;
2549 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2550 QDIO_PRINT_WARN("response upon setting delay target " \
2551 "is 0x%x. Continuing.\n",resp_code);
2552 sprintf(dbf_text,"sdtR%4x",resp_code);
2553 QDIO_DBF_TEXT1(0,trace,dbf_text);
2554 QDIO_DBF_TEXT1(0,setup,dbf_text);
2555 ptr=&scsscf_area->response;
2556 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
2557 }
2558 QDIO_DBF_TEXT2(0,trace,"delytrgt");
2559 QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2560 result = 0; /* not critical */
2561out:
2562 mempool_free(scsscf_area, qdio_mempool_scssc);
2563 return result;
2564}
2565
2566int
2567qdio_cleanup(struct ccw_device *cdev, int how)
2568{
2569 struct qdio_irq *irq_ptr;
2570 char dbf_text[15];
2571 int rc;
2572
2573 irq_ptr = cdev->private->qdio_data;
2574 if (!irq_ptr)
2575 return -ENODEV;
2576
2577 sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2578 QDIO_DBF_TEXT1(0,trace,dbf_text);
2579 QDIO_DBF_TEXT0(0,setup,dbf_text);
2580
2581 rc = qdio_shutdown(cdev, how);
2582 if ((rc == 0) || (rc == -EINPROGRESS))
2583 rc = qdio_free(cdev);
2584 return rc;
2585}
2586
2587int
2588qdio_shutdown(struct ccw_device *cdev, int how)
2589{
2590 struct qdio_irq *irq_ptr;
2591 int i;
2592 int result = 0;
2593 int rc;
2594 unsigned long flags;
2595 int timeout;
2596 char dbf_text[15];
2597
2598 irq_ptr = cdev->private->qdio_data;
2599 if (!irq_ptr)
2600 return -ENODEV;
2601
2602 down(&irq_ptr->setting_up_sema);
2603
2604 sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2605 QDIO_DBF_TEXT1(0,trace,dbf_text);
2606 QDIO_DBF_TEXT0(0,setup,dbf_text);
2607
2608 /* mark all qs as uninteresting */
2609 for (i=0;i<irq_ptr->no_input_qs;i++)
2610 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
2611
2612 for (i=0;i<irq_ptr->no_output_qs;i++)
2613 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
2614
2615 tasklet_kill(&tiqdio_tasklet);
2616
2617 for (i=0;i<irq_ptr->no_input_qs;i++) {
2618 qdio_unmark_q(irq_ptr->input_qs[i]);
2619 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
2620 wait_event_interruptible_timeout(cdev->private->wait_q,
2621 !atomic_read(&irq_ptr->
2622 input_qs[i]->
2623 use_count),
2624 QDIO_NO_USE_COUNT_TIMEOUT);
2625 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
2626 result=-EINPROGRESS;
2627 }
2628
2629 for (i=0;i<irq_ptr->no_output_qs;i++) {
2630 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
2631 del_timer(&irq_ptr->output_qs[i]->timer);
2632 wait_event_interruptible_timeout(cdev->private->wait_q,
2633 !atomic_read(&irq_ptr->
2634 output_qs[i]->
2635 use_count),
2636 QDIO_NO_USE_COUNT_TIMEOUT);
2637 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
2638 result=-EINPROGRESS;
2639 }
2640
2641 /* cleanup subchannel */
2642 spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
2643 if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
2644 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
2645 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
2646 } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
2647 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2648 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2649 } else { /* default behaviour */
2650 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2651 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2652 }
2653 if (rc == -ENODEV) {
2654 /* No need to wait for device no longer present. */
2655 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2656 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2657 } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
2658 /*
2659 * Whoever put another handler there, has to cope with the
2660 * interrupt theirself. Might happen if qdio_shutdown was
2661 * called on already shutdown queues, but this shouldn't have
2662 * bad side effects.
2663 */
2664 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2665 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2666 } else if (rc == 0) {
2667 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2668 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2669
2670 wait_event_interruptible_timeout(cdev->private->wait_q,
2671 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2672 irq_ptr->state == QDIO_IRQ_STATE_ERR,
2673 timeout);
2674 } else {
2675 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2676 "device %s\n", result, cdev->dev.bus_id);
2677 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2678 result = rc;
2679 goto out;
2680 }
2681 if (irq_ptr->is_thinint_irq) {
2682 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
2683 tiqdio_set_subchannel_ind(irq_ptr,1);
2684 /* reset adapter interrupt indicators */
2685 }
2686
2687 /* exchange int handlers, if necessary */
2688 if ((void*)cdev->handler == (void*)qdio_handler)
2689 cdev->handler=irq_ptr->original_int_handler;
2690
2691 /* Ignore errors. */
2692 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2693out:
2694 up(&irq_ptr->setting_up_sema);
2695 return result;
2696}
2697
2698int
2699qdio_free(struct ccw_device *cdev)
2700{
2701 struct qdio_irq *irq_ptr;
2702 char dbf_text[15];
2703
2704 irq_ptr = cdev->private->qdio_data;
2705 if (!irq_ptr)
2706 return -ENODEV;
2707
2708 down(&irq_ptr->setting_up_sema);
2709
2710 sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2711 QDIO_DBF_TEXT1(0,trace,dbf_text);
2712 QDIO_DBF_TEXT0(0,setup,dbf_text);
2713
2714 cdev->private->qdio_data = NULL;
2715
2716 up(&irq_ptr->setting_up_sema);
2717
2718 qdio_release_irq_memory(irq_ptr);
2719 module_put(THIS_MODULE);
2720 return 0;
2721}
2722
2723static void
2724qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2725{
2726 char dbf_text[20]; /* if a printf printed out more than 8 chars */
2727
2728 sprintf(dbf_text,"qfmt:%x",init_data->q_format);
2729 QDIO_DBF_TEXT0(0,setup,dbf_text);
2730 QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
2731 sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
2732 QDIO_DBF_TEXT0(0,setup,dbf_text);
2733 QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
2734 QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
2735 QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
2736 sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
2737 QDIO_DBF_TEXT0(0,setup,dbf_text);
2738 sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
2739 QDIO_DBF_TEXT0(0,setup,dbf_text);
2740 sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
2741 QDIO_DBF_TEXT0(0,setup,dbf_text);
2742 sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
2743 QDIO_DBF_TEXT0(0,setup,dbf_text);
2744 sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
2745 QDIO_DBF_TEXT0(0,setup,dbf_text);
2746 sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
2747 QDIO_DBF_TEXT0(0,setup,dbf_text);
2748 QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
2749 QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
2750 QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
2751 QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
2752 QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
2753 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2754}
2755
2756static void
2757qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2758{
2759 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
2760 irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2761
2762 irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
2763
2764 irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
2765
2766 irq_ptr->qdr->qdf0[i].slsba=
2767 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
2768
2769 irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
2770 irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
2771 irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
2772 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2773}
2774
2775static void
2776qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2777 int j, int iqfmt)
2778{
2779 irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
2780 irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2781
2782 irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
2783
2784 irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
2785
2786 irq_ptr->qdr->qdf0[i+j].slsba=
2787 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
2788
2789 irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
2790 irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
2791 irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
2792 irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
2793}
2794
2795
2796static void
2797qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2798{
2799 int i;
2800
2801 for (i=0;i<irq_ptr->no_input_qs;i++) {
2802 irq_ptr->input_qs[i]->siga_sync=
2803 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2804 irq_ptr->input_qs[i]->siga_in=
2805 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2806 irq_ptr->input_qs[i]->siga_out=
2807 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2808 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
2809 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2810 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
2811 irq_ptr->hydra_gives_outbound_pcis;
2812 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
2813 ((irq_ptr->qdioac&
2814 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2815 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2816 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2817 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2818
2819 }
2820}
2821
2822static void
2823qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2824{
2825 int i;
2826
2827 for (i=0;i<irq_ptr->no_output_qs;i++) {
2828 irq_ptr->output_qs[i]->siga_sync=
2829 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2830 irq_ptr->output_qs[i]->siga_in=
2831 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2832 irq_ptr->output_qs[i]->siga_out=
2833 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2834 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
2835 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2836 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
2837 irq_ptr->hydra_gives_outbound_pcis;
2838 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
2839 ((irq_ptr->qdioac&
2840 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2841 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2842 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2843 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2844
2845 }
2846}
2847
2848static int
2849qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2850 int dstat)
2851{
2852 char dbf_text[15];
2853 struct qdio_irq *irq_ptr;
2854
2855 irq_ptr = cdev->private->qdio_data;
2856
2857 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2858 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2859 QDIO_DBF_TEXT2(1,trace,dbf_text);
2860 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2861 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2862 QDIO_PRINT_ERR("received check condition on establish " \
2863 "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2864 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2865 cstat,dstat);
2866 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2867 }
2868
2869 if (!(dstat & DEV_STAT_DEV_END)) {
2870 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2871 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2872 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2873 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2874 "device end: dstat=%02x, cstat=%02x\n",
2875 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2876 dstat, cstat);
2877 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2878 return 1;
2879 }
2880
2881 if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
2882 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2883 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2884 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2885 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2886 "the following devstat: dstat=%02x, "
2887 "cstat=%02x\n", irq_ptr->schid.ssid,
2888 irq_ptr->schid.sch_no, dstat, cstat);
2889 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2890 return 1;
2891 }
2892 return 0;
2893}
2894
2895static void
2896qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2897{
2898 struct qdio_irq *irq_ptr;
2899 char dbf_text[15];
2900
2901 irq_ptr = cdev->private->qdio_data;
2902
2903 sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
2904 QDIO_DBF_TEXT0(0,setup,dbf_text);
2905 QDIO_DBF_TEXT0(0,trace,dbf_text);
2906
2907 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat))
2908 return;
2909
2910 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2911}
2912
2913int
2914qdio_initialize(struct qdio_initialize *init_data)
2915{
2916 int rc;
2917 char dbf_text[15];
2918
2919 sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
2920 QDIO_DBF_TEXT0(0,setup,dbf_text);
2921 QDIO_DBF_TEXT0(0,trace,dbf_text);
2922
2923 rc = qdio_allocate(init_data);
2924 if (rc == 0) {
2925 rc = qdio_establish(init_data);
2926 if (rc != 0)
2927 qdio_free(init_data->cdev);
2928 }
2929
2930 return rc;
2931}
2932
2933
2934int
2935qdio_allocate(struct qdio_initialize *init_data)
2936{
2937 struct qdio_irq *irq_ptr;
2938 char dbf_text[15];
2939
2940 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
2941 QDIO_DBF_TEXT0(0,setup,dbf_text);
2942 QDIO_DBF_TEXT0(0,trace,dbf_text);
2943 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2944 (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2945 ((init_data->no_input_qs) && (!init_data->input_handler)) ||
2946 ((init_data->no_output_qs) && (!init_data->output_handler)) )
2947 return -EINVAL;
2948
2949 if (!init_data->input_sbal_addr_array)
2950 return -EINVAL;
2951
2952 if (!init_data->output_sbal_addr_array)
2953 return -EINVAL;
2954
2955 qdio_allocate_do_dbf(init_data);
2956
2957 /* create irq */
2958 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2959
2960 QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2961 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2962
2963 if (!irq_ptr) {
2964 QDIO_PRINT_ERR("allocation of irq_ptr failed!\n");
2965 return -ENOMEM;
2966 }
2967
2968 init_MUTEX(&irq_ptr->setting_up_sema);
2969
2970 /* QDR must be in DMA area since CCW data address is only 32 bit */
2971 irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA);
2972 if (!(irq_ptr->qdr)) {
2973 free_page((unsigned long) irq_ptr);
2974 QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n");
2975 return -ENOMEM;
2976 }
2977 QDIO_DBF_TEXT0(0,setup,"qdr:");
2978 QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
2979
2980 if (qdio_alloc_qs(irq_ptr,
2981 init_data->no_input_qs,
2982 init_data->no_output_qs)) {
2983 QDIO_PRINT_ERR("queue allocation failed!\n");
2984 qdio_release_irq_memory(irq_ptr);
2985 return -ENOMEM;
2986 }
2987
2988 init_data->cdev->private->qdio_data = irq_ptr;
2989
2990 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
2991
2992 return 0;
2993}
2994
2995static int qdio_fill_irq(struct qdio_initialize *init_data)
2996{
2997 int i;
2998 char dbf_text[15];
2999 struct ciw *ciw;
3000 int is_iqdio;
3001 struct qdio_irq *irq_ptr;
3002
3003 irq_ptr = init_data->cdev->private->qdio_data;
3004
3005 memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
3006
3007 /* wipes qib.ac, required by ar7063 */
3008 memset(irq_ptr->qdr,0,sizeof(struct qdr));
3009
3010 irq_ptr->int_parm=init_data->int_parm;
3011
3012 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
3013 irq_ptr->no_input_qs=init_data->no_input_qs;
3014 irq_ptr->no_output_qs=init_data->no_output_qs;
3015
3016 if (init_data->q_format==QDIO_IQDIO_QFMT) {
3017 irq_ptr->is_iqdio_irq=1;
3018 irq_ptr->is_thinint_irq=1;
3019 } else {
3020 irq_ptr->is_iqdio_irq=0;
3021 irq_ptr->is_thinint_irq=hydra_thinints;
3022 }
3023 sprintf(dbf_text,"is_i_t%1x%1x",
3024 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
3025 QDIO_DBF_TEXT2(0,setup,dbf_text);
3026
3027 if (irq_ptr->is_thinint_irq) {
3028 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
3029 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
3030 if (!irq_ptr->dev_st_chg_ind) {
3031 QDIO_PRINT_WARN("no indicator location available " \
3032 "for irq 0.%x.%x\n",
3033 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
3034 qdio_release_irq_memory(irq_ptr);
3035 return -ENOBUFS;
3036 }
3037 }
3038
3039 /* defaults */
3040 irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
3041 irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
3042 irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
3043 irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
3044
3045 qdio_fill_qs(irq_ptr, init_data->cdev,
3046 init_data->no_input_qs,
3047 init_data->no_output_qs,
3048 init_data->input_handler,
3049 init_data->output_handler,init_data->int_parm,
3050 init_data->q_format,init_data->flags,
3051 init_data->input_sbal_addr_array,
3052 init_data->output_sbal_addr_array);
3053
3054 if (!try_module_get(THIS_MODULE)) {
3055 QDIO_PRINT_CRIT("try_module_get() failed!\n");
3056 qdio_release_irq_memory(irq_ptr);
3057 return -EINVAL;
3058 }
3059
3060 qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
3061 init_data->no_output_qs,
3062 init_data->min_input_threshold,
3063 init_data->max_input_threshold,
3064 init_data->min_output_threshold,
3065 init_data->max_output_threshold);
3066
3067 /* fill in qdr */
3068 irq_ptr->qdr->qfmt=init_data->q_format;
3069 irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
3070 irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
3071 irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
3072 irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
3073
3074 irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
3075 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
3076
3077 /* fill in qib */
3078 irq_ptr->is_qebsm = is_passthrough;
3079 if (irq_ptr->is_qebsm)
3080 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3081
3082 irq_ptr->qib.qfmt=init_data->q_format;
3083 if (init_data->no_input_qs)
3084 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
3085 if (init_data->no_output_qs)
3086 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
3087 memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
3088
3089 qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
3090 init_data->qib_param_field,
3091 init_data->no_input_qs,
3092 init_data->no_output_qs,
3093 init_data->input_slib_elements,
3094 init_data->output_slib_elements);
3095
3096 /* first input descriptors, then output descriptors */
3097 is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
3098 for (i=0;i<init_data->no_input_qs;i++)
3099 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
3100
3101 for (i=0;i<init_data->no_output_qs;i++)
3102 qdio_allocate_fill_output_desc(irq_ptr, i,
3103 init_data->no_input_qs,
3104 is_iqdio);
3105
3106 /* qdr, qib, sls, slsbs, slibs, sbales filled. */
3107
3108 /* get qdio commands */
3109 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
3110 if (!ciw) {
3111 QDIO_DBF_TEXT2(1,setup,"no eq");
3112 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
3113 "Trying to use default.\n");
3114 } else
3115 irq_ptr->equeue = *ciw;
3116 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
3117 if (!ciw) {
3118 QDIO_DBF_TEXT2(1,setup,"no aq");
3119 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
3120 "Trying to use default.\n");
3121 } else
3122 irq_ptr->aqueue = *ciw;
3123
3124 /* Set new interrupt handler. */
3125 irq_ptr->original_int_handler = init_data->cdev->handler;
3126 init_data->cdev->handler = qdio_handler;
3127
3128 return 0;
3129}
3130
3131int
3132qdio_establish(struct qdio_initialize *init_data)
3133{
3134 struct qdio_irq *irq_ptr;
3135 unsigned long saveflags;
3136 int result, result2;
3137 struct ccw_device *cdev;
3138 char dbf_text[20];
3139
3140 cdev=init_data->cdev;
3141 irq_ptr = cdev->private->qdio_data;
3142 if (!irq_ptr)
3143 return -EINVAL;
3144
3145 if (cdev->private->state != DEV_STATE_ONLINE)
3146 return -EINVAL;
3147
3148 down(&irq_ptr->setting_up_sema);
3149
3150 qdio_fill_irq(init_data);
3151
3152 /* the thinint CHSC stuff */
3153 if (irq_ptr->is_thinint_irq) {
3154
3155 result = tiqdio_set_subchannel_ind(irq_ptr,0);
3156 if (result) {
3157 up(&irq_ptr->setting_up_sema);
3158 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3159 return result;
3160 }
3161 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
3162 }
3163
3164 sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
3165 QDIO_DBF_TEXT0(0,setup,dbf_text);
3166 QDIO_DBF_TEXT0(0,trace,dbf_text);
3167
3168 /* establish q */
3169 irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
3170 irq_ptr->ccw.flags=CCW_FLAG_SLI;
3171 irq_ptr->ccw.count=irq_ptr->equeue.count;
3172 irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
3173
3174 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3175
3176 ccw_device_set_options_mask(cdev, 0);
3177 result = ccw_device_start(cdev, &irq_ptr->ccw,
3178 QDIO_DOING_ESTABLISH, 0, 0);
3179 if (result) {
3180 result2 = ccw_device_start(cdev, &irq_ptr->ccw,
3181 QDIO_DOING_ESTABLISH, 0, 0);
3182 sprintf(dbf_text,"eq:io%4x",result);
3183 QDIO_DBF_TEXT2(1,setup,dbf_text);
3184 if (result2) {
3185 sprintf(dbf_text,"eq:io%4x",result);
3186 QDIO_DBF_TEXT2(1,setup,dbf_text);
3187 }
3188 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
3189 "returned %i, next try returned %i\n",
3190 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3191 result, result2);
3192 result=result2;
3193 }
3194
3195 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3196
3197 if (result) {
3198 up(&irq_ptr->setting_up_sema);
3199 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
3200 return result;
3201 }
3202
3203 wait_event_interruptible_timeout(cdev->private->wait_q,
3204 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
3205 irq_ptr->state == QDIO_IRQ_STATE_ERR,
3206 QDIO_ESTABLISH_TIMEOUT);
3207
3208 if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
3209 result = 0;
3210 else {
3211 up(&irq_ptr->setting_up_sema);
3212 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3213 return -EIO;
3214 }
3215
3216 qdio_get_ssqd_siga(irq_ptr);
3217 /* if this gets set once, we're running under VM and can omit SVSes */
3218 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
3219 omit_svs=1;
3220
3221 sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
3222 QDIO_DBF_TEXT2(0,setup,dbf_text);
3223
3224 sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
3225 QDIO_DBF_TEXT2(0,setup,dbf_text);
3226
3227 irq_ptr->hydra_gives_outbound_pcis=
3228 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
3229 irq_ptr->sync_done_on_outb_pcis=
3230 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
3231
3232 qdio_initialize_set_siga_flags_input(irq_ptr);
3233 qdio_initialize_set_siga_flags_output(irq_ptr);
3234
3235 up(&irq_ptr->setting_up_sema);
3236
3237 return result;
3238
3239}
3240
3241int
3242qdio_activate(struct ccw_device *cdev, int flags)
3243{
3244 struct qdio_irq *irq_ptr;
3245 int i,result=0,result2;
3246 unsigned long saveflags;
3247 char dbf_text[20]; /* see qdio_initialize */
3248
3249 irq_ptr = cdev->private->qdio_data;
3250 if (!irq_ptr)
3251 return -ENODEV;
3252
3253 if (cdev->private->state != DEV_STATE_ONLINE)
3254 return -EINVAL;
3255
3256 down(&irq_ptr->setting_up_sema);
3257 if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
3258 result=-EBUSY;
3259 goto out;
3260 }
3261
3262 sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
3263 QDIO_DBF_TEXT2(0,setup,dbf_text);
3264 QDIO_DBF_TEXT2(0,trace,dbf_text);
3265
3266 /* activate q */
3267 irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
3268 irq_ptr->ccw.flags=CCW_FLAG_SLI;
3269 irq_ptr->ccw.count=irq_ptr->aqueue.count;
3270 irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
3271
3272 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3273
3274 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3275 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3276 0, DOIO_DENY_PREFETCH);
3277 if (result) {
3278 result2=ccw_device_start(cdev,&irq_ptr->ccw,
3279 QDIO_DOING_ACTIVATE,0,0);
3280 sprintf(dbf_text,"aq:io%4x",result);
3281 QDIO_DBF_TEXT2(1,setup,dbf_text);
3282 if (result2) {
3283 sprintf(dbf_text,"aq:io%4x",result);
3284 QDIO_DBF_TEXT2(1,setup,dbf_text);
3285 }
3286 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
3287 "returned %i, next try returned %i\n",
3288 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3289 result, result2);
3290 result=result2;
3291 }
3292
3293 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3294 if (result)
3295 goto out;
3296
3297 for (i=0;i<irq_ptr->no_input_qs;i++) {
3298 if (irq_ptr->is_thinint_irq) {
3299 /*
3300 * that way we know, that, if we will get interrupted
3301 * by tiqdio_inbound_processing, qdio_unmark_q will
3302 * not be called
3303 */
3304 qdio_reserve_q(irq_ptr->input_qs[i]);
3305 qdio_mark_tiq(irq_ptr->input_qs[i]);
3306 qdio_release_q(irq_ptr->input_qs[i]);
3307 }
3308 }
3309
3310 if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
3311 for (i=0;i<irq_ptr->no_input_qs;i++) {
3312 irq_ptr->input_qs[i]->is_input_q|=
3313 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
3314 }
3315 }
3316
3317 msleep(QDIO_ACTIVATE_TIMEOUT);
3318 switch (irq_ptr->state) {
3319 case QDIO_IRQ_STATE_STOPPED:
3320 case QDIO_IRQ_STATE_ERR:
3321 up(&irq_ptr->setting_up_sema);
3322 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3323 down(&irq_ptr->setting_up_sema);
3324 result = -EIO;
3325 break;
3326 default:
3327 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
3328 result = 0;
3329 }
3330 out:
3331 up(&irq_ptr->setting_up_sema);
3332
3333 return result;
3334}
3335
3336/* buffers filled forwards again to make Rick happy */
3337static void
3338qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3339 unsigned int count, struct qdio_buffer *buffers)
3340{
3341 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3342 int tmp = 0;
3343
3344 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3345 if (irq->is_qebsm) {
3346 while (count) {
3347 tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3348 if (!tmp)
3349 return;
3350 }
3351 return;
3352 }
3353 for (;;) {
3354 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3355 count--;
3356 if (!count) break;
3357 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3358 }
3359}
3360
3361static void
3362qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3363 unsigned int count, struct qdio_buffer *buffers)
3364{
3365 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3366 int tmp = 0;
3367
3368 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3369 if (irq->is_qebsm) {
3370 while (count) {
3371 tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3372 if (!tmp)
3373 return;
3374 }
3375 return;
3376 }
3377
3378 for (;;) {
3379 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3380 count--;
3381 if (!count) break;
3382 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3383 }
3384}
3385
3386static void
3387do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3388 unsigned int qidx, unsigned int count,
3389 struct qdio_buffer *buffers)
3390{
3391 int used_elements;
3392
3393 /* This is the inbound handling of queues */
3394 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3395
3396 qdio_do_qdio_fill_input(q,qidx,count,buffers);
3397
3398 if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3399 (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3400 atomic_xchg(&q->polling,0);
3401
3402 if (used_elements)
3403 return;
3404 if (callflags&QDIO_FLAG_DONT_SIGA)
3405 return;
3406 if (q->siga_in) {
3407 int result;
3408
3409 result=qdio_siga_input(q);
3410 if (result) {
3411 if (q->siga_error)
3412 q->error_status_flags|=
3413 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
3414 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
3415 q->siga_error=result;
3416 }
3417 }
3418
3419 qdio_mark_q(q);
3420}
3421
3422static void
3423do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3424 unsigned int qidx, unsigned int count,
3425 struct qdio_buffer *buffers)
3426{
3427 int used_elements;
3428 unsigned int cnt, start_buf;
3429 unsigned char state = 0;
3430 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3431
3432 /* This is the outbound handling of queues */
3433 qdio_do_qdio_fill_output(q,qidx,count,buffers);
3434
3435 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3436
3437 if (callflags&QDIO_FLAG_DONT_SIGA) {
3438 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3439 return;
3440 }
3441 if (callflags & QDIO_FLAG_PCI_OUT)
3442 q->is_pci_out = 1;
3443 else
3444 q->is_pci_out = 0;
3445 if (q->is_iqdio_q) {
3446 /* one siga for every sbal */
3447 while (count--)
3448 qdio_kick_outbound_q(q);
3449
3450 __qdio_outbound_processing(q);
3451 } else {
3452 /* under VM, we do a SIGA sync unconditionally */
3453 SYNC_MEMORY;
3454 else {
3455 /*
3456 * w/o shadow queues (else branch of
3457 * SYNC_MEMORY :-/ ), we try to
3458 * fast-requeue buffers
3459 */
3460 if (irq->is_qebsm) {
3461 cnt = 1;
3462 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3463 (QDIO_MAX_BUFFERS_PER_Q-1));
3464 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3465 } else
3466 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3467 &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3468 if (state != SLSB_CU_OUTPUT_PRIMED) {
3469 qdio_kick_outbound_q(q);
3470 } else {
3471 QDIO_DBF_TEXT3(0,trace, "fast-req");
3472 qdio_perf_stat_inc(&perf_stats.fast_reqs);
3473 }
3474 }
3475 /*
3476 * only marking the q could take too long,
3477 * the upper layer module could do a lot of
3478 * traffic in that time
3479 */
3480 __qdio_outbound_processing(q);
3481 }
3482
3483 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3484}
3485
3486/* count must be 1 in iqdio */
3487int
3488do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3489 unsigned int queue_number, unsigned int qidx,
3490 unsigned int count,struct qdio_buffer *buffers)
3491{
3492 struct qdio_irq *irq_ptr;
3493#ifdef CONFIG_QDIO_DEBUG
3494 char dbf_text[20];
3495
3496 sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
3497 QDIO_DBF_TEXT3(0,trace,dbf_text);
3498#endif /* CONFIG_QDIO_DEBUG */
3499
3500 if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
3501 (count>QDIO_MAX_BUFFERS_PER_Q) ||
3502 (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
3503 return -EINVAL;
3504
3505 if (count==0)
3506 return 0;
3507
3508 irq_ptr = cdev->private->qdio_data;
3509 if (!irq_ptr)
3510 return -ENODEV;
3511
3512#ifdef CONFIG_QDIO_DEBUG
3513 if (callflags&QDIO_FLAG_SYNC_INPUT)
3514 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
3515 sizeof(void*));
3516 else
3517 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
3518 sizeof(void*));
3519 sprintf(dbf_text,"flag%04x",callflags);
3520 QDIO_DBF_TEXT3(0,trace,dbf_text);
3521 sprintf(dbf_text,"qi%02xct%02x",qidx,count);
3522 QDIO_DBF_TEXT3(0,trace,dbf_text);
3523#endif /* CONFIG_QDIO_DEBUG */
3524
3525 if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
3526 return -EBUSY;
3527
3528 if (callflags&QDIO_FLAG_SYNC_INPUT)
3529 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
3530 callflags, qidx, count, buffers);
3531 else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
3532 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
3533 callflags, qidx, count, buffers);
3534 else {
3535 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
3536 return -EINVAL;
3537 }
3538 return 0;
3539}
3540
3541static int
3542qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3543 int buffer_length, int *eof, void *data)
3544{
3545 int c=0;
3546
3547 /* we are always called with buffer_length=4k, so we all
3548 deliver on the first read */
3549 if (offset>0)
3550 return 0;
3551
3552#define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3553#ifdef CONFIG_64BIT
3554 _OUTP_IT("Number of tasklet runs (total) : %li\n",
3555 (long)atomic64_read(&perf_stats.tl_runs));
3556 _OUTP_IT("Inbound tasklet runs tried/retried : %li/%li\n",
3557 (long)atomic64_read(&perf_stats.inbound_tl_runs),
3558 (long)atomic64_read(&perf_stats.inbound_tl_runs_resched));
3559 _OUTP_IT("Inbound-thin tasklet runs tried/retried : %li/%li\n",
3560 (long)atomic64_read(&perf_stats.inbound_thin_tl_runs),
3561 (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched));
3562 _OUTP_IT("Outbound tasklet runs tried/retried : %li/%li\n",
3563 (long)atomic64_read(&perf_stats.outbound_tl_runs),
3564 (long)atomic64_read(&perf_stats.outbound_tl_runs_resched));
3565 _OUTP_IT("\n");
3566 _OUTP_IT("Number of SIGA sync's issued : %li\n",
3567 (long)atomic64_read(&perf_stats.siga_syncs));
3568 _OUTP_IT("Number of SIGA in's issued : %li\n",
3569 (long)atomic64_read(&perf_stats.siga_ins));
3570 _OUTP_IT("Number of SIGA out's issued : %li\n",
3571 (long)atomic64_read(&perf_stats.siga_outs));
3572 _OUTP_IT("Number of PCIs caught : %li\n",
3573 (long)atomic64_read(&perf_stats.pcis));
3574 _OUTP_IT("Number of adapter interrupts caught : %li\n",
3575 (long)atomic64_read(&perf_stats.thinints));
3576 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %li\n",
3577 (long)atomic64_read(&perf_stats.fast_reqs));
3578 _OUTP_IT("\n");
3579 _OUTP_IT("Number of inbound transfers : %li\n",
3580 (long)atomic64_read(&perf_stats.inbound_cnt));
3581 _OUTP_IT("Number of do_QDIOs outbound : %li\n",
3582 (long)atomic64_read(&perf_stats.outbound_cnt));
3583#else /* CONFIG_64BIT */
3584 _OUTP_IT("Number of tasklet runs (total) : %i\n",
3585 atomic_read(&perf_stats.tl_runs));
3586 _OUTP_IT("Inbound tasklet runs tried/retried : %i/%i\n",
3587 atomic_read(&perf_stats.inbound_tl_runs),
3588 atomic_read(&perf_stats.inbound_tl_runs_resched));
3589 _OUTP_IT("Inbound-thin tasklet runs tried/retried : %i/%i\n",
3590 atomic_read(&perf_stats.inbound_thin_tl_runs),
3591 atomic_read(&perf_stats.inbound_thin_tl_runs_resched));
3592 _OUTP_IT("Outbound tasklet runs tried/retried : %i/%i\n",
3593 atomic_read(&perf_stats.outbound_tl_runs),
3594 atomic_read(&perf_stats.outbound_tl_runs_resched));
3595 _OUTP_IT("\n");
3596 _OUTP_IT("Number of SIGA sync's issued : %i\n",
3597 atomic_read(&perf_stats.siga_syncs));
3598 _OUTP_IT("Number of SIGA in's issued : %i\n",
3599 atomic_read(&perf_stats.siga_ins));
3600 _OUTP_IT("Number of SIGA out's issued : %i\n",
3601 atomic_read(&perf_stats.siga_outs));
3602 _OUTP_IT("Number of PCIs caught : %i\n",
3603 atomic_read(&perf_stats.pcis));
3604 _OUTP_IT("Number of adapter interrupts caught : %i\n",
3605 atomic_read(&perf_stats.thinints));
3606 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %i\n",
3607 atomic_read(&perf_stats.fast_reqs));
3608 _OUTP_IT("\n");
3609 _OUTP_IT("Number of inbound transfers : %i\n",
3610 atomic_read(&perf_stats.inbound_cnt));
3611 _OUTP_IT("Number of do_QDIOs outbound : %i\n",
3612 atomic_read(&perf_stats.outbound_cnt));
3613#endif /* CONFIG_64BIT */
3614 _OUTP_IT("\n");
3615
3616 return c;
3617}
3618
3619static struct proc_dir_entry *qdio_perf_proc_file;
3620
3621static void
3622qdio_add_procfs_entry(void)
3623{
3624 proc_perf_file_registration=0;
3625 qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3626 S_IFREG|0444,NULL);
3627 if (qdio_perf_proc_file) {
3628 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3629 } else proc_perf_file_registration=-1;
3630
3631 if (proc_perf_file_registration)
3632 QDIO_PRINT_WARN("was not able to register perf. " \
3633 "proc-file (%i).\n",
3634 proc_perf_file_registration);
3635}
3636
3637static void
3638qdio_remove_procfs_entry(void)
3639{
3640 if (!proc_perf_file_registration) /* means if it went ok earlier */
3641 remove_proc_entry(QDIO_PERF,NULL);
3642}
3643
3644/**
3645 * attributes in sysfs
3646 *****************************************************************************/
3647
3648static ssize_t
3649qdio_performance_stats_show(struct bus_type *bus, char *buf)
3650{
3651 return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
3652}
3653
3654static ssize_t
3655qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
3656{
3657 unsigned long i;
3658 int ret;
3659
3660 ret = strict_strtoul(buf, 16, &i);
3661 if (!ret && ((i == 0) || (i == 1))) {
3662 if (i == qdio_performance_stats)
3663 return count;
3664 qdio_performance_stats = i;
3665 if (i==0) {
3666 /* reset perf. stat. info */
3667#ifdef CONFIG_64BIT
3668 atomic64_set(&perf_stats.tl_runs, 0);
3669 atomic64_set(&perf_stats.outbound_tl_runs, 0);
3670 atomic64_set(&perf_stats.inbound_tl_runs, 0);
3671 atomic64_set(&perf_stats.inbound_tl_runs_resched, 0);
3672 atomic64_set(&perf_stats.inbound_thin_tl_runs, 0);
3673 atomic64_set(&perf_stats.inbound_thin_tl_runs_resched,
3674 0);
3675 atomic64_set(&perf_stats.siga_outs, 0);
3676 atomic64_set(&perf_stats.siga_ins, 0);
3677 atomic64_set(&perf_stats.siga_syncs, 0);
3678 atomic64_set(&perf_stats.pcis, 0);
3679 atomic64_set(&perf_stats.thinints, 0);
3680 atomic64_set(&perf_stats.fast_reqs, 0);
3681 atomic64_set(&perf_stats.outbound_cnt, 0);
3682 atomic64_set(&perf_stats.inbound_cnt, 0);
3683#else /* CONFIG_64BIT */
3684 atomic_set(&perf_stats.tl_runs, 0);
3685 atomic_set(&perf_stats.outbound_tl_runs, 0);
3686 atomic_set(&perf_stats.inbound_tl_runs, 0);
3687 atomic_set(&perf_stats.inbound_tl_runs_resched, 0);
3688 atomic_set(&perf_stats.inbound_thin_tl_runs, 0);
3689 atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0);
3690 atomic_set(&perf_stats.siga_outs, 0);
3691 atomic_set(&perf_stats.siga_ins, 0);
3692 atomic_set(&perf_stats.siga_syncs, 0);
3693 atomic_set(&perf_stats.pcis, 0);
3694 atomic_set(&perf_stats.thinints, 0);
3695 atomic_set(&perf_stats.fast_reqs, 0);
3696 atomic_set(&perf_stats.outbound_cnt, 0);
3697 atomic_set(&perf_stats.inbound_cnt, 0);
3698#endif /* CONFIG_64BIT */
3699 }
3700 } else {
3701 QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n");
3702 return -EINVAL;
3703 }
3704 return count;
3705}
3706
3707static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
3708 qdio_performance_stats_store);
3709
3710static void
3711tiqdio_register_thinints(void)
3712{
3713 char dbf_text[20];
3714
3715 tiqdio_ind =
3716 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL,
3717 TIQDIO_THININT_ISC);
3718 if (IS_ERR(tiqdio_ind)) {
3719 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
3720 QDIO_DBF_TEXT0(0,setup,dbf_text);
3721 QDIO_PRINT_ERR("failed to register adapter handler " \
3722 "(rc=%li).\nAdapter interrupts might " \
3723 "not work. Continuing.\n",
3724 PTR_ERR(tiqdio_ind));
3725 tiqdio_ind = NULL;
3726 }
3727}
3728
3729static void
3730tiqdio_unregister_thinints(void)
3731{
3732 if (tiqdio_ind)
3733 s390_unregister_adapter_interrupt(tiqdio_ind,
3734 TIQDIO_THININT_ISC);
3735}
3736
3737static int
3738qdio_get_qdio_memory(void)
3739{
3740 int i;
3741 indicator_used[0]=1;
3742
3743 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3744 indicator_used[i]=0;
3745 indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3746 GFP_KERNEL);
3747 if (!indicators)
3748 return -ENOMEM;
3749 return 0;
3750}
3751
3752static void
3753qdio_release_qdio_memory(void)
3754{
3755 kfree(indicators);
3756}
3757
3758static void
3759qdio_unregister_dbf_views(void)
3760{
3761 if (qdio_dbf_setup)
3762 debug_unregister(qdio_dbf_setup);
3763 if (qdio_dbf_sbal)
3764 debug_unregister(qdio_dbf_sbal);
3765 if (qdio_dbf_sense)
3766 debug_unregister(qdio_dbf_sense);
3767 if (qdio_dbf_trace)
3768 debug_unregister(qdio_dbf_trace);
3769#ifdef CONFIG_QDIO_DEBUG
3770 if (qdio_dbf_slsb_out)
3771 debug_unregister(qdio_dbf_slsb_out);
3772 if (qdio_dbf_slsb_in)
3773 debug_unregister(qdio_dbf_slsb_in);
3774#endif /* CONFIG_QDIO_DEBUG */
3775}
3776
3777static int
3778qdio_register_dbf_views(void)
3779{
3780 qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
3781 QDIO_DBF_SETUP_PAGES,
3782 QDIO_DBF_SETUP_NR_AREAS,
3783 QDIO_DBF_SETUP_LEN);
3784 if (!qdio_dbf_setup)
3785 goto oom;
3786 debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
3787 debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
3788
3789 qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
3790 QDIO_DBF_SBAL_PAGES,
3791 QDIO_DBF_SBAL_NR_AREAS,
3792 QDIO_DBF_SBAL_LEN);
3793 if (!qdio_dbf_sbal)
3794 goto oom;
3795
3796 debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
3797 debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
3798
3799 qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
3800 QDIO_DBF_SENSE_PAGES,
3801 QDIO_DBF_SENSE_NR_AREAS,
3802 QDIO_DBF_SENSE_LEN);
3803 if (!qdio_dbf_sense)
3804 goto oom;
3805
3806 debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
3807 debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
3808
3809 qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
3810 QDIO_DBF_TRACE_PAGES,
3811 QDIO_DBF_TRACE_NR_AREAS,
3812 QDIO_DBF_TRACE_LEN);
3813 if (!qdio_dbf_trace)
3814 goto oom;
3815
3816 debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
3817 debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
3818
3819#ifdef CONFIG_QDIO_DEBUG
3820 qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
3821 QDIO_DBF_SLSB_OUT_PAGES,
3822 QDIO_DBF_SLSB_OUT_NR_AREAS,
3823 QDIO_DBF_SLSB_OUT_LEN);
3824 if (!qdio_dbf_slsb_out)
3825 goto oom;
3826 debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
3827 debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
3828
3829 qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
3830 QDIO_DBF_SLSB_IN_PAGES,
3831 QDIO_DBF_SLSB_IN_NR_AREAS,
3832 QDIO_DBF_SLSB_IN_LEN);
3833 if (!qdio_dbf_slsb_in)
3834 goto oom;
3835 debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
3836 debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
3837#endif /* CONFIG_QDIO_DEBUG */
3838 return 0;
3839oom:
3840 QDIO_PRINT_ERR("not enough memory for dbf.\n");
3841 qdio_unregister_dbf_views();
3842 return -ENOMEM;
3843}
3844
3845static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
3846{
3847 return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
3848}
3849
3850static void qdio_mempool_free(void *element, void *size)
3851{
3852 free_page((unsigned long) element);
3853}
3854
3855static int __init
3856init_QDIO(void)
3857{
3858 int res;
3859 void *ptr;
3860
3861 printk("qdio: loading %s\n",version);
3862
3863 res=qdio_get_qdio_memory();
3864 if (res)
3865 return res;
3866
3867 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
3868 256, 0, NULL);
3869 if (!qdio_q_cache) {
3870 qdio_release_qdio_memory();
3871 return -ENOMEM;
3872 }
3873
3874 res = qdio_register_dbf_views();
3875 if (res) {
3876 kmem_cache_destroy(qdio_q_cache);
3877 qdio_release_qdio_memory();
3878 return res;
3879 }
3880
3881 QDIO_DBF_TEXT0(0,setup,"initQDIO");
3882 res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3883
3884 memset((void*)&perf_stats,0,sizeof(perf_stats));
3885 QDIO_DBF_TEXT0(0,setup,"perfstat");
3886 ptr=&perf_stats;
3887 QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3888
3889 qdio_add_procfs_entry();
3890
3891 qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
3892 qdio_mempool_alloc,
3893 qdio_mempool_free, NULL);
3894
3895 isc_register(QDIO_AIRQ_ISC);
3896 if (tiqdio_check_chsc_availability())
3897 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3898
3899 tiqdio_register_thinints();
3900
3901 return 0;
3902 }
3903
3904static void __exit
3905cleanup_QDIO(void)
3906{
3907 tiqdio_unregister_thinints();
3908 isc_unregister(QDIO_AIRQ_ISC);
3909 qdio_remove_procfs_entry();
3910 qdio_release_qdio_memory();
3911 qdio_unregister_dbf_views();
3912 mempool_destroy(qdio_mempool_scssc);
3913 kmem_cache_destroy(qdio_q_cache);
3914 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3915 printk("qdio: %s: module removed\n",version);
3916}
3917
3918module_init(init_QDIO);
3919module_exit(cleanup_QDIO);
3920
3921EXPORT_SYMBOL(qdio_allocate);
3922EXPORT_SYMBOL(qdio_establish);
3923EXPORT_SYMBOL(qdio_initialize);
3924EXPORT_SYMBOL(qdio_activate);
3925EXPORT_SYMBOL(do_QDIO);
3926EXPORT_SYMBOL(qdio_shutdown);
3927EXPORT_SYMBOL(qdio_free);
3928EXPORT_SYMBOL(qdio_cleanup);
3929EXPORT_SYMBOL(qdio_synchronize);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 7656081a24d2..c1a70985abfa 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,66 +1,20 @@
1/*
2 * linux/drivers/s390/cio/qdio.h
3 *
4 * Copyright 2000,2008 IBM Corp.
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 */
1#ifndef _CIO_QDIO_H 8#ifndef _CIO_QDIO_H
2#define _CIO_QDIO_H 9#define _CIO_QDIO_H
3 10
4#include <asm/page.h> 11#include <asm/page.h>
5#include <asm/isc.h>
6#include <asm/schid.h> 12#include <asm/schid.h>
13#include "chsc.h"
7 14
8#ifdef CONFIG_QDIO_DEBUG 15#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
9#define QDIO_VERBOSE_LEVEL 9 16#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */
10#else /* CONFIG_QDIO_DEBUG */ 17#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
11#define QDIO_VERBOSE_LEVEL 5
12#endif /* CONFIG_QDIO_DEBUG */
13#define QDIO_USE_PROCESSING_STATE
14
15#define QDIO_MINIMAL_BH_RELIEF_TIME 16
16#define QDIO_TIMER_POLL_VALUE 1
17#define IQDIO_TIMER_POLL_VALUE 1
18
19/*
20 * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
21 * we never know, whether we'll get initiative again, e.g. to give the
22 * transmit skb's back to the stack, however the stack may be waiting for
23 * them... therefore we define 4 as threshold to start polling (which
24 * will stop as soon as the asynchronous queue catches up)
25 * btw, this only applies to the asynchronous HiperSockets queue
26 */
27#define IQDIO_FILL_LEVEL_TO_POLL 4
28
29#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC
30#define TIQDIO_DELAY_TARGET 0
31#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
32#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
33#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
34#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */
35#define IQDIO_LOCAL_LAPS 4
36#define IQDIO_LOCAL_LAPS_INT 1
37#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
38/*#define IQDIO_IQDC_INT_PARM 0x1234*/
39
40#define QDIO_Q_LAPS 5
41
42#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY
43
44#define L2_CACHELINE_SIZE 256
45#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
46
47#define QDIO_PERF "qdio_perf"
48
49/* must be a power of 2 */
50/*#define QDIO_STATS_NUMBER 4
51
52#define QDIO_STATS_CLASSES 2
53#define QDIO_STATS_COUNT_NEEDED 2*/
54
55#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
56 exiting without having use_count
57 of the queue to 0 */
58
59#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
60#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
61#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
62#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
63#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */
64 18
65enum qdio_irq_states { 19enum qdio_irq_states {
66 QDIO_IRQ_STATE_INACTIVE, 20 QDIO_IRQ_STATE_INACTIVE,
@@ -72,565 +26,352 @@ enum qdio_irq_states {
72 NR_QDIO_IRQ_STATES, 26 NR_QDIO_IRQ_STATES,
73}; 27};
74 28
75/* used as intparm in do_IO: */ 29/* used as intparm in do_IO */
76#define QDIO_DOING_SENSEID 0 30#define QDIO_DOING_ESTABLISH 1
77#define QDIO_DOING_ESTABLISH 1 31#define QDIO_DOING_ACTIVATE 2
78#define QDIO_DOING_ACTIVATE 2 32#define QDIO_DOING_CLEANUP 3
79#define QDIO_DOING_CLEANUP 3 33
80 34#define SLSB_STATE_NOT_INIT 0x0
81/************************* DEBUG FACILITY STUFF *********************/ 35#define SLSB_STATE_EMPTY 0x1
82 36#define SLSB_STATE_PRIMED 0x2
83#define QDIO_DBF_HEX(ex,name,level,addr,len) \ 37#define SLSB_STATE_HALTED 0xe
84 do { \ 38#define SLSB_STATE_ERROR 0xf
85 if (ex) \ 39#define SLSB_TYPE_INPUT 0x0
86 debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \ 40#define SLSB_TYPE_OUTPUT 0x20
87 else \ 41#define SLSB_OWNER_PROG 0x80
88 debug_event(qdio_dbf_##name,level,(void*)(addr),len); \ 42#define SLSB_OWNER_CU 0x40
89 } while (0) 43
90#define QDIO_DBF_TEXT(ex,name,level,text) \ 44#define SLSB_P_INPUT_NOT_INIT \
91 do { \ 45 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */
92 if (ex) \ 46#define SLSB_P_INPUT_ACK \
93 debug_text_exception(qdio_dbf_##name,level,text); \ 47 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */
94 else \ 48#define SLSB_CU_INPUT_EMPTY \
95 debug_text_event(qdio_dbf_##name,level,text); \ 49 (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */
96 } while (0) 50#define SLSB_P_INPUT_PRIMED \
97 51 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */
98 52#define SLSB_P_INPUT_HALTED \
99#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len) 53 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */
100#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len) 54#define SLSB_P_INPUT_ERROR \
101#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len) 55 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */
102#ifdef CONFIG_QDIO_DEBUG 56#define SLSB_P_OUTPUT_NOT_INIT \
103#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len) 57 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
104#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len) 58#define SLSB_P_OUTPUT_EMPTY \
105#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len) 59 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
106#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len) 60#define SLSB_CU_OUTPUT_PRIMED \
107#else /* CONFIG_QDIO_DEBUG */ 61 (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
108#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0) 62#define SLSB_P_OUTPUT_HALTED \
109#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0) 63 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */
110#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0) 64#define SLSB_P_OUTPUT_ERROR \
111#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0) 65 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */
112#endif /* CONFIG_QDIO_DEBUG */ 66
113 67#define SLSB_ERROR_DURING_LOOKUP 0xff
114#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text) 68
115#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text) 69/* additional CIWs returned by extended Sense-ID */
116#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text) 70#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
117#ifdef CONFIG_QDIO_DEBUG 71#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
118#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
119#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
120#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
121#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
122#else /* CONFIG_QDIO_DEBUG */
123#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
124#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
125#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
126#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
127#endif /* CONFIG_QDIO_DEBUG */
128
129#define QDIO_DBF_SETUP_NAME "qdio_setup"
130#define QDIO_DBF_SETUP_LEN 8
131#define QDIO_DBF_SETUP_PAGES 4
132#define QDIO_DBF_SETUP_NR_AREAS 1
133#ifdef CONFIG_QDIO_DEBUG
134#define QDIO_DBF_SETUP_LEVEL 6
135#else /* CONFIG_QDIO_DEBUG */
136#define QDIO_DBF_SETUP_LEVEL 2
137#endif /* CONFIG_QDIO_DEBUG */
138
139#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
140#define QDIO_DBF_SBAL_LEN 256
141#define QDIO_DBF_SBAL_PAGES 4
142#define QDIO_DBF_SBAL_NR_AREAS 2
143#ifdef CONFIG_QDIO_DEBUG
144#define QDIO_DBF_SBAL_LEVEL 6
145#else /* CONFIG_QDIO_DEBUG */
146#define QDIO_DBF_SBAL_LEVEL 2
147#endif /* CONFIG_QDIO_DEBUG */
148
149#define QDIO_DBF_TRACE_NAME "qdio_trace"
150#define QDIO_DBF_TRACE_LEN 8
151#define QDIO_DBF_TRACE_NR_AREAS 2
152#ifdef CONFIG_QDIO_DEBUG
153#define QDIO_DBF_TRACE_PAGES 16
154#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
155#else /* CONFIG_QDIO_DEBUG */
156#define QDIO_DBF_TRACE_PAGES 4
157#define QDIO_DBF_TRACE_LEVEL 2
158#endif /* CONFIG_QDIO_DEBUG */
159
160#define QDIO_DBF_SENSE_NAME "qdio_sense"
161#define QDIO_DBF_SENSE_LEN 64
162#define QDIO_DBF_SENSE_PAGES 2
163#define QDIO_DBF_SENSE_NR_AREAS 1
164#ifdef CONFIG_QDIO_DEBUG
165#define QDIO_DBF_SENSE_LEVEL 6
166#else /* CONFIG_QDIO_DEBUG */
167#define QDIO_DBF_SENSE_LEVEL 2
168#endif /* CONFIG_QDIO_DEBUG */
169
170#ifdef CONFIG_QDIO_DEBUG
171#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
172
173#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
174#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
175#define QDIO_DBF_SLSB_OUT_PAGES 256
176#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
177#define QDIO_DBF_SLSB_OUT_LEVEL 6
178
179#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
180#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
181#define QDIO_DBF_SLSB_IN_PAGES 256
182#define QDIO_DBF_SLSB_IN_NR_AREAS 1
183#define QDIO_DBF_SLSB_IN_LEVEL 6
184#endif /* CONFIG_QDIO_DEBUG */
185
186#define QDIO_PRINTK_HEADER QDIO_NAME ": "
187
188#if QDIO_VERBOSE_LEVEL>8
189#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
190#else
191#define QDIO_PRINT_STUPID(x...) do { } while (0)
192#endif
193 72
194#if QDIO_VERBOSE_LEVEL>7 73/* flags for st qdio sch data */
195#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) 74#define CHSC_FLAG_QDIO_CAPABILITY 0x80
196#else 75#define CHSC_FLAG_VALIDITY 0x40
197#define QDIO_PRINT_ALL(x...) do { } while (0) 76
198#endif 77/* qdio adapter-characteristics-1 flag */
199 78#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
200#if QDIO_VERBOSE_LEVEL>6 79#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
201#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) 80#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
202#else 81#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
203#define QDIO_PRINT_INFO(x...) do { } while (0) 82#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
204#endif 83#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
205 84#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
206#if QDIO_VERBOSE_LEVEL>5
207#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
208#else
209#define QDIO_PRINT_WARN(x...) do { } while (0)
210#endif
211
212#if QDIO_VERBOSE_LEVEL>4
213#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
214#else
215#define QDIO_PRINT_ERR(x...) do { } while (0)
216#endif
217
218#if QDIO_VERBOSE_LEVEL>3
219#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
220#else
221#define QDIO_PRINT_CRIT(x...) do { } while (0)
222#endif
223
224#if QDIO_VERBOSE_LEVEL>2
225#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
226#else
227#define QDIO_PRINT_ALERT(x...) do { } while (0)
228#endif
229 85
230#if QDIO_VERBOSE_LEVEL>1 86#ifdef CONFIG_64BIT
231#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) 87static inline int do_sqbs(u64 token, unsigned char state, int queue,
232#else 88 int *start, int *count)
233#define QDIO_PRINT_EMERG(x...) do { } while (0) 89{
234#endif 90 register unsigned long _ccq asm ("0") = *count;
235 91 register unsigned long _token asm ("1") = token;
236#define QDIO_HEXDUMP16(importance,header,ptr) \ 92 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
237QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
238 "%02x %02x %02x %02x %02x %02x %02x %02x " \
239 "%02x %02x %02x %02x\n",*(((char*)ptr)), \
240 *(((char*)ptr)+1),*(((char*)ptr)+2), \
241 *(((char*)ptr)+3),*(((char*)ptr)+4), \
242 *(((char*)ptr)+5),*(((char*)ptr)+6), \
243 *(((char*)ptr)+7),*(((char*)ptr)+8), \
244 *(((char*)ptr)+9),*(((char*)ptr)+10), \
245 *(((char*)ptr)+11),*(((char*)ptr)+12), \
246 *(((char*)ptr)+13),*(((char*)ptr)+14), \
247 *(((char*)ptr)+15)); \
248QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
249 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
250 *(((char*)ptr)+16),*(((char*)ptr)+17), \
251 *(((char*)ptr)+18),*(((char*)ptr)+19), \
252 *(((char*)ptr)+20),*(((char*)ptr)+21), \
253 *(((char*)ptr)+22),*(((char*)ptr)+23), \
254 *(((char*)ptr)+24),*(((char*)ptr)+25), \
255 *(((char*)ptr)+26),*(((char*)ptr)+27), \
256 *(((char*)ptr)+28),*(((char*)ptr)+29), \
257 *(((char*)ptr)+30),*(((char*)ptr)+31));
258
259/****************** END OF DEBUG FACILITY STUFF *********************/
260 93
261/* 94 asm volatile(
262 * Some instructions as assembly 95 " .insn rsy,0xeb000000008A,%1,0,0(%2)"
263 */ 96 : "+d" (_ccq), "+d" (_queuestart)
97 : "d" ((unsigned long)state), "d" (_token)
98 : "memory", "cc");
99 *count = _ccq & 0xff;
100 *start = _queuestart & 0xff;
264 101
265static inline int 102 return (_ccq >> 32) & 0xff;
266do_sqbs(unsigned long sch, unsigned char state, int queue,
267 unsigned int *start, unsigned int *count)
268{
269#ifdef CONFIG_64BIT
270 register unsigned long _ccq asm ("0") = *count;
271 register unsigned long _sch asm ("1") = sch;
272 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
273
274 asm volatile(
275 " .insn rsy,0xeb000000008A,%1,0,0(%2)"
276 : "+d" (_ccq), "+d" (_queuestart)
277 : "d" ((unsigned long)state), "d" (_sch)
278 : "memory", "cc");
279 *count = _ccq & 0xff;
280 *start = _queuestart & 0xff;
281
282 return (_ccq >> 32) & 0xff;
283#else
284 return 0;
285#endif
286} 103}
287 104
288static inline int 105static inline int do_eqbs(u64 token, unsigned char *state, int queue,
289do_eqbs(unsigned long sch, unsigned char *state, int queue, 106 int *start, int *count)
290 unsigned int *start, unsigned int *count)
291{ 107{
292#ifdef CONFIG_64BIT
293 register unsigned long _ccq asm ("0") = *count; 108 register unsigned long _ccq asm ("0") = *count;
294 register unsigned long _sch asm ("1") = sch; 109 register unsigned long _token asm ("1") = token;
295 unsigned long _queuestart = ((unsigned long)queue << 32) | *start; 110 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
296 unsigned long _state = 0; 111 unsigned long _state = 0;
297 112
298 asm volatile( 113 asm volatile(
299 " .insn rrf,0xB99c0000,%1,%2,0,0" 114 " .insn rrf,0xB99c0000,%1,%2,0,0"
300 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) 115 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
301 : "d" (_sch) 116 : "d" (_token)
302 : "memory", "cc" ); 117 : "memory", "cc");
303 *count = _ccq & 0xff; 118 *count = _ccq & 0xff;
304 *start = _queuestart & 0xff; 119 *start = _queuestart & 0xff;
305 *state = _state & 0xff; 120 *state = _state & 0xff;
306 121
307 return (_ccq >> 32) & 0xff; 122 return (_ccq >> 32) & 0xff;
308#else
309 return 0;
310#endif
311}
312
313
314static inline int
315do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
316{
317 register unsigned long reg0 asm ("0") = 2;
318 register struct subchannel_id reg1 asm ("1") = schid;
319 register unsigned long reg2 asm ("2") = mask1;
320 register unsigned long reg3 asm ("3") = mask2;
321 int cc;
322
323 asm volatile(
324 " siga 0\n"
325 " ipm %0\n"
326 " srl %0,28\n"
327 : "=d" (cc)
328 : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc");
329 return cc;
330}
331
332static inline int
333do_siga_input(struct subchannel_id schid, unsigned int mask)
334{
335 register unsigned long reg0 asm ("0") = 1;
336 register struct subchannel_id reg1 asm ("1") = schid;
337 register unsigned long reg2 asm ("2") = mask;
338 int cc;
339
340 asm volatile(
341 " siga 0\n"
342 " ipm %0\n"
343 " srl %0,28\n"
344 : "=d" (cc)
345 : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory");
346 return cc;
347}
348
349static inline int
350do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
351 unsigned int fc)
352{
353 register unsigned long __fc asm("0") = fc;
354 register unsigned long __schid asm("1") = schid;
355 register unsigned long __mask asm("2") = mask;
356 int cc;
357
358 asm volatile(
359 " siga 0\n"
360 "0: ipm %0\n"
361 " srl %0,28\n"
362 "1:\n"
363 EX_TABLE(0b,1b)
364 : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
365 : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
366 : "cc", "memory");
367 (*bb) = ((unsigned int) __fc) >> 31;
368 return cc;
369}
370
371static inline unsigned long
372do_clear_global_summary(void)
373{
374 register unsigned long __fn asm("1") = 3;
375 register unsigned long __tmp asm("2");
376 register unsigned long __time asm("3");
377
378 asm volatile(
379 " .insn rre,0xb2650000,2,0"
380 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
381 return __time;
382} 123}
383 124#else
384/* 125static inline int do_sqbs(u64 token, unsigned char state, int queue,
385 * QDIO device commands returned by extended Sense-ID 126 int *start, int *count) { return 0; }
386 */ 127static inline int do_eqbs(u64 token, unsigned char *state, int queue,
387#define DEFAULT_ESTABLISH_QS_CMD 0x1b 128 int *start, int *count) { return 0; }
388#define DEFAULT_ESTABLISH_QS_COUNT 0x1000 129#endif /* CONFIG_64BIT */
389#define DEFAULT_ACTIVATE_QS_CMD 0x1f
390#define DEFAULT_ACTIVATE_QS_COUNT 0
391
392/*
393 * additional CIWs returned by extended Sense-ID
394 */
395#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
396#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
397 130
398#define QDIO_CHSC_RESPONSE_CODE_OK 1 131struct qdio_irq;
399/* flags for st qdio sch data */
400#define CHSC_FLAG_QDIO_CAPABILITY 0x80
401#define CHSC_FLAG_VALIDITY 0x40
402 132
403#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40 133struct siga_flag {
404#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20 134 u8 input:1;
405#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10 135 u8 output:1;
406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 136 u8 sync:1;
407#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 137 u8 no_sync_ti:1;
138 u8 no_sync_out_ti:1;
139 u8 no_sync_out_pci:1;
140 u8:2;
141} __attribute__ ((packed));
408 142
409struct qdio_chsc_ssqd { 143struct chsc_ssqd_area {
410 struct chsc_header request; 144 struct chsc_header request;
411 u16 reserved1:10; 145 u16:10;
412 u16 ssid:2; 146 u8 ssid:2;
413 u16 fmt:4; 147 u8 fmt:4;
414 u16 first_sch; 148 u16 first_sch;
415 u16 reserved2; 149 u16:16;
416 u16 last_sch; 150 u16 last_sch;
417 u32 reserved3; 151 u32:32;
418 struct chsc_header response; 152 struct chsc_header response;
419 u32 reserved4; 153 u32:32;
420 u8 flags; 154 struct qdio_ssqd_desc qdio_ssqd;
421 u8 reserved5; 155} __attribute__ ((packed));
422 u16 sch;
423 u8 qfmt;
424 u8 parm;
425 u8 qdioac1;
426 u8 sch_class;
427 u8 pct;
428 u8 icnt;
429 u8 reserved7;
430 u8 ocnt;
431 u8 reserved8;
432 u8 mbccnt;
433 u16 qdioac2;
434 u64 sch_token;
435};
436 156
437struct qdio_perf_stats { 157struct scssc_area {
438#ifdef CONFIG_64BIT 158 struct chsc_header request;
439 atomic64_t tl_runs; 159 u16 operation_code;
440 atomic64_t outbound_tl_runs; 160 u16:16;
441 atomic64_t outbound_tl_runs_resched; 161 u32:32;
442 atomic64_t inbound_tl_runs; 162 u32:32;
443 atomic64_t inbound_tl_runs_resched; 163 u64 summary_indicator_addr;
444 atomic64_t inbound_thin_tl_runs; 164 u64 subchannel_indicator_addr;
445 atomic64_t inbound_thin_tl_runs_resched; 165 u32 ks:4;
446 166 u32 kc:4;
447 atomic64_t siga_outs; 167 u32:21;
448 atomic64_t siga_ins; 168 u32 isc:3;
449 atomic64_t siga_syncs; 169 u32 word_with_d_bit;
450 atomic64_t pcis; 170 u32:32;
451 atomic64_t thinints; 171 struct subchannel_id schid;
452 atomic64_t fast_reqs; 172 u32 reserved[1004];
453 173 struct chsc_header response;
454 atomic64_t outbound_cnt; 174 u32:32;
455 atomic64_t inbound_cnt; 175} __attribute__ ((packed));
456#else /* CONFIG_64BIT */ 176
457 atomic_t tl_runs; 177struct qdio_input_q {
458 atomic_t outbound_tl_runs; 178 /* input buffer acknowledgement flag */
459 atomic_t outbound_tl_runs_resched; 179 int polling;
460 atomic_t inbound_tl_runs; 180
461 atomic_t inbound_tl_runs_resched; 181 /* last time of noticing incoming data */
462 atomic_t inbound_thin_tl_runs; 182 u64 timestamp;
463 atomic_t inbound_thin_tl_runs_resched; 183
464 184 /* lock for clearing the acknowledgement */
465 atomic_t siga_outs; 185 spinlock_t lock;
466 atomic_t siga_ins;
467 atomic_t siga_syncs;
468 atomic_t pcis;
469 atomic_t thinints;
470 atomic_t fast_reqs;
471
472 atomic_t outbound_cnt;
473 atomic_t inbound_cnt;
474#endif /* CONFIG_64BIT */
475}; 186};
476 187
477/* unlikely as the later the better */ 188struct qdio_output_q {
478#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) 189 /* failed siga-w attempts*/
479#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ 190 atomic_t busy_siga_counter;
480 qdio_siga_sync(q,~0U,~0U)
481#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \
482 qdio_siga_sync(q,~0U,0)
483 191
484#define NOW qdio_get_micros() 192 /* start time of busy condition */
485#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW 193 u64 timestamp;
486#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time)
487#define SAVE_FRONTIER(q,val) q->last_move_ftc=val
488#define GET_SAVED_FRONTIER(q) (q->last_move_ftc)
489 194
490#define MY_MODULE_STRING(x) #x 195 /* PCIs are enabled for the queue */
196 int pci_out_enabled;
491 197
492#ifdef CONFIG_64BIT 198 /* timer to check for more outbound work */
493#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) 199 struct timer_list timer;
494#else /* CONFIG_64BIT */ 200};
495#define QDIO_GET_ADDR(x) ((__u32)(long)x)
496#endif /* CONFIG_64BIT */
497 201
498struct qdio_q { 202struct qdio_q {
499 volatile struct slsb slsb; 203 struct slsb slsb;
204 union {
205 struct qdio_input_q in;
206 struct qdio_output_q out;
207 } u;
500 208
501 char unused[QDIO_MAX_BUFFERS_PER_Q]; 209 /* queue number */
210 int nr;
502 211
503 __u32 * dev_st_chg_ind; 212 /* bitmask of queue number */
213 int mask;
504 214
215 /* input or output queue */
505 int is_input_q; 216 int is_input_q;
506 struct subchannel_id schid;
507 struct ccw_device *cdev;
508
509 unsigned int is_iqdio_q;
510 unsigned int is_thinint_q;
511 217
512 /* bit 0 means queue 0, bit 1 means queue 1, ... */ 218 /* list of thinint input queues */
513 unsigned int mask; 219 struct list_head entry;
514 unsigned int q_no;
515 220
221 /* upper-layer program handler */
516 qdio_handler_t (*handler); 222 qdio_handler_t (*handler);
517 223
518 /* points to the next buffer to be checked for having 224 /*
519 * been processed by the card (outbound) 225 * inbound: next buffer the program should check for
520 * or to the next buffer the program should check for (inbound) */ 226 * outbound: next buffer to check for having been processed
521 volatile int first_to_check; 227 * by the card
522 /* and the last time it was: */ 228 */
523 volatile int last_move_ftc; 229 int first_to_check;
524 230
525 atomic_t number_of_buffers_used; 231 /* first_to_check of the last time */
526 atomic_t polling; 232 int last_move_ftc;
527 233
528 unsigned int siga_in; 234 /* beginning position for calling the program */
529 unsigned int siga_out; 235 int first_to_kick;
530 unsigned int siga_sync;
531 unsigned int siga_sync_done_on_thinints;
532 unsigned int siga_sync_done_on_outb_tis;
533 unsigned int hydra_gives_outbound_pcis;
534 236
535 /* used to save beginning position when calling dd_handlers */ 237 /* number of buffers in use by the adapter */
536 int first_element_to_kick; 238 atomic_t nr_buf_used;
537 239
538 atomic_t use_count; 240 struct qdio_irq *irq_ptr;
539 atomic_t is_in_shutdown;
540
541 void *irq_ptr;
542
543 struct timer_list timer;
544#ifdef QDIO_USE_TIMERS_FOR_POLLING
545 atomic_t timer_already_set;
546 spinlock_t timer_lock;
547#else /* QDIO_USE_TIMERS_FOR_POLLING */
548 struct tasklet_struct tasklet; 241 struct tasklet_struct tasklet;
549#endif /* QDIO_USE_TIMERS_FOR_POLLING */
550 242
551 243 /* error condition during a data transfer */
552 enum qdio_irq_states state;
553
554 /* used to store the error condition during a data transfer */
555 unsigned int qdio_error; 244 unsigned int qdio_error;
556 unsigned int siga_error;
557 unsigned int error_status_flags;
558
559 /* list of interesting queues */
560 volatile struct qdio_q *list_next;
561 volatile struct qdio_q *list_prev;
562 245
563 struct sl *sl; 246 struct sl *sl;
564 volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q]; 247 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
565 248
566 struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q]; 249 /*
567 250 * Warning: Leave this member at the end so it won't be cleared in
568 unsigned long int_parm; 251 * qdio_fill_qs. A page is allocated under this pointer and used for
569 252 * slib and sl. slib is 2048 bytes big and sl points to offset
570 /*struct { 253 * PAGE_SIZE / 2.
571 int in_bh_check_limit; 254 */
572 int threshold; 255 struct slib *slib;
573 } threshold_classes[QDIO_STATS_CLASSES];*/
574
575 struct {
576 /* inbound: the time to stop polling
577 outbound: the time to kick peer */
578 int threshold; /* the real value */
579
580 /* outbound: last time of do_QDIO
581 inbound: last time of noticing incoming data */
582 /*__u64 last_transfer_times[QDIO_STATS_NUMBER];
583 int last_transfer_index; */
584
585 __u64 last_transfer_time;
586 __u64 busy_start;
587 } timing;
588 atomic_t busy_siga_counter;
589 unsigned int queue_type;
590 unsigned int is_pci_out;
591
592 /* leave this member at the end. won't be cleared in qdio_fill_qs */
593 struct slib *slib; /* a page is allocated under this pointer,
594 sl points into this page, offset PAGE_SIZE/2
595 (after slib) */
596} __attribute__ ((aligned(256))); 256} __attribute__ ((aligned(256)));
597 257
598struct qdio_irq { 258struct qdio_irq {
599 __u32 * volatile dev_st_chg_ind; 259 struct qib qib;
260 u32 *dsci; /* address of device state change indicator */
261 struct ccw_device *cdev;
600 262
601 unsigned long int_parm; 263 unsigned long int_parm;
602 struct subchannel_id schid; 264 struct subchannel_id schid;
603 265 unsigned long sch_token; /* QEBSM facility */
604 unsigned int is_iqdio_irq;
605 unsigned int is_thinint_irq;
606 unsigned int hydra_gives_outbound_pcis;
607 unsigned int sync_done_on_outb_pcis;
608
609 /* QEBSM facility */
610 unsigned int is_qebsm;
611 unsigned long sch_token;
612 266
613 enum qdio_irq_states state; 267 enum qdio_irq_states state;
614 268
615 unsigned int no_input_qs; 269 struct siga_flag siga_flag; /* siga sync information from qdioac */
616 unsigned int no_output_qs;
617 270
618 unsigned char qdioac; 271 int nr_input_qs;
272 int nr_output_qs;
619 273
620 struct ccw1 ccw; 274 struct ccw1 ccw;
621
622 struct ciw equeue; 275 struct ciw equeue;
623 struct ciw aqueue; 276 struct ciw aqueue;
624 277
625 struct qib qib; 278 struct qdio_ssqd_desc ssqd_desc;
626 279
627 void (*original_int_handler) (struct ccw_device *, 280 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
628 unsigned long, struct irb *);
629 281
630 /* leave these four members together at the end. won't be cleared in qdio_fill_irq */ 282 /*
283 * Warning: Leave these members together at the end so they won't be
284 * cleared in qdio_setup_irq.
285 */
631 struct qdr *qdr; 286 struct qdr *qdr;
287 unsigned long chsc_page;
288
632 struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; 289 struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
633 struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; 290 struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
634 struct semaphore setting_up_sema; 291
292 struct mutex setup_mutex;
635}; 293};
636#endif 294
295/* helper functions */
296#define queue_type(q) q->irq_ptr->qib.qfmt
297
298#define is_thinint_irq(irq) \
299 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
300 css_general_characteristics.aif_osa)
301
302/* the highest iqdio queue is used for multicast */
303static inline int multicast_outbound(struct qdio_q *q)
304{
305 return (q->irq_ptr->nr_output_qs > 1) &&
306 (q->nr == q->irq_ptr->nr_output_qs - 1);
307}
308
309static inline unsigned long long get_usecs(void)
310{
311 return monotonic_clock() >> 12;
312}
313
314#define pci_out_supported(q) \
315 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
316#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
317
318#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
319#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
320#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
321#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
322#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync)
323#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci)
324
325#define for_each_input_queue(irq_ptr, q, i) \
326 for (i = 0, q = irq_ptr->input_qs[0]; \
327 i < irq_ptr->nr_input_qs; \
328 q = irq_ptr->input_qs[++i])
329#define for_each_output_queue(irq_ptr, q, i) \
330 for (i = 0, q = irq_ptr->output_qs[0]; \
331 i < irq_ptr->nr_output_qs; \
332 q = irq_ptr->output_qs[++i])
333
334#define prev_buf(bufnr) \
335 ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
336#define next_buf(bufnr) \
337 ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
338#define add_buf(bufnr, inc) \
339 ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
340
341/* prototypes for thin interrupt */
342void qdio_sync_after_thinint(struct qdio_q *q);
343int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state);
344void qdio_check_outbound_after_thinint(struct qdio_q *q);
345int qdio_inbound_q_moved(struct qdio_q *q);
346void qdio_kick_inbound_handler(struct qdio_q *q);
347void qdio_stop_polling(struct qdio_q *q);
348int qdio_siga_sync_q(struct qdio_q *q);
349
350void qdio_setup_thinint(struct qdio_irq *irq_ptr);
351int qdio_establish_thinint(struct qdio_irq *irq_ptr);
352void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
353void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
354void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
355void tiqdio_inbound_processing(unsigned long q);
356int tiqdio_allocate_memory(void);
357void tiqdio_free_memory(void);
358int tiqdio_register_thinints(void);
359void tiqdio_unregister_thinints(void);
360
361/* prototypes for setup */
362void qdio_inbound_processing(unsigned long data);
363void qdio_outbound_processing(unsigned long data);
364void qdio_outbound_timer(unsigned long data);
365void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
366 struct irb *irb);
367int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
368 int nr_output_qs);
369void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
370int qdio_setup_irq(struct qdio_initialize *init_data);
371void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
372 struct ccw_device *cdev);
373void qdio_release_memory(struct qdio_irq *irq_ptr);
374int qdio_setup_init(void);
375void qdio_setup_exit(void);
376
377#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
new file mode 100644
index 000000000000..337aa3087a78
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.c
@@ -0,0 +1,240 @@
1/*
2 * drivers/s390/cio/qdio_debug.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#include <linux/proc_fs.h>
9#include <linux/seq_file.h>
10#include <linux/debugfs.h>
11#include <asm/qdio.h>
12#include <asm/debug.h>
13#include "qdio_debug.h"
14#include "qdio.h"
15
16debug_info_t *qdio_dbf_setup;
17debug_info_t *qdio_dbf_trace;
18
19static struct dentry *debugfs_root;
20#define MAX_DEBUGFS_QUEUES 32
21static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
22static DEFINE_MUTEX(debugfs_mutex);
23
24void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
25{
26 char dbf_text[20];
27
28 sprintf(dbf_text, "qfmt:%x", init_data->q_format);
29 QDIO_DBF_TEXT0(0, setup, dbf_text);
30 QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8);
31 sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format);
32 QDIO_DBF_TEXT0(0, setup, dbf_text);
33 QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *));
34 QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *));
35 QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *));
36 sprintf(dbf_text, "niq:%4x", init_data->no_input_qs);
37 QDIO_DBF_TEXT0(0, setup, dbf_text);
38 sprintf(dbf_text, "noq:%4x", init_data->no_output_qs);
39 QDIO_DBF_TEXT0(0, setup, dbf_text);
40 QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *));
41 QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *));
42 QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long));
43 QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long));
44 QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *));
45 QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *));
46}
47
48static void qdio_unregister_dbf_views(void)
49{
50 if (qdio_dbf_setup)
51 debug_unregister(qdio_dbf_setup);
52 if (qdio_dbf_trace)
53 debug_unregister(qdio_dbf_trace);
54}
55
56static int qdio_register_dbf_views(void)
57{
58 qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES,
59 QDIO_DBF_SETUP_NR_AREAS,
60 QDIO_DBF_SETUP_LEN);
61 if (!qdio_dbf_setup)
62 goto oom;
63 debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
64 debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL);
65
66 qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES,
67 QDIO_DBF_TRACE_NR_AREAS,
68 QDIO_DBF_TRACE_LEN);
69 if (!qdio_dbf_trace)
70 goto oom;
71 debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view);
72 debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL);
73 return 0;
74oom:
75 qdio_unregister_dbf_views();
76 return -ENOMEM;
77}
78
79static int qstat_show(struct seq_file *m, void *v)
80{
81 unsigned char state;
82 struct qdio_q *q = m->private;
83 int i;
84
85 if (!q)
86 return 0;
87
88 seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci);
89 seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
90 seq_printf(m, "ftc: %d\n", q->first_to_check);
91 seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc);
92 seq_printf(m, "polling: %d\n", q->u.in.polling);
93 seq_printf(m, "slsb buffer states:\n");
94
95 qdio_siga_sync_q(q);
96 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
97 get_buf_state(q, i, &state);
98 switch (state) {
99 case SLSB_P_INPUT_NOT_INIT:
100 case SLSB_P_OUTPUT_NOT_INIT:
101 seq_printf(m, "N");
102 break;
103 case SLSB_P_INPUT_PRIMED:
104 case SLSB_CU_OUTPUT_PRIMED:
105 seq_printf(m, "+");
106 break;
107 case SLSB_P_INPUT_ACK:
108 seq_printf(m, "A");
109 break;
110 case SLSB_P_INPUT_ERROR:
111 case SLSB_P_OUTPUT_ERROR:
112 seq_printf(m, "x");
113 break;
114 case SLSB_CU_INPUT_EMPTY:
115 case SLSB_P_OUTPUT_EMPTY:
116 seq_printf(m, "-");
117 break;
118 case SLSB_P_INPUT_HALTED:
119 case SLSB_P_OUTPUT_HALTED:
120 seq_printf(m, ".");
121 break;
122 default:
123 seq_printf(m, "?");
124 }
125 if (i == 63)
126 seq_printf(m, "\n");
127 }
128 seq_printf(m, "\n");
129 return 0;
130}
131
132static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
133 size_t count, loff_t *off)
134{
135 struct seq_file *seq = file->private_data;
136 struct qdio_q *q = seq->private;
137
138 if (!q)
139 return 0;
140
141 if (q->is_input_q)
142 xchg(q->irq_ptr->dsci, 1);
143 local_bh_disable();
144 tasklet_schedule(&q->tasklet);
145 local_bh_enable();
146 return count;
147}
148
149static int qstat_seq_open(struct inode *inode, struct file *filp)
150{
151 return single_open(filp, qstat_show,
152 filp->f_path.dentry->d_inode->i_private);
153}
154
155static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
156{
157 memset(name, 0, sizeof(name));
158 sprintf(name, "%s", cdev->dev.bus_id);
159 if (q->is_input_q)
160 sprintf(name + strlen(name), "_input");
161 else
162 sprintf(name + strlen(name), "_output");
163 sprintf(name + strlen(name), "_%d", q->nr);
164}
165
166static void remove_debugfs_entry(struct qdio_q *q)
167{
168 int i;
169
170 for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
171 if (!debugfs_queues[i])
172 continue;
173 if (debugfs_queues[i]->d_inode->i_private == q) {
174 debugfs_remove(debugfs_queues[i]);
175 debugfs_queues[i] = NULL;
176 }
177 }
178}
179
180static struct file_operations debugfs_fops = {
181 .owner = THIS_MODULE,
182 .open = qstat_seq_open,
183 .read = seq_read,
184 .write = qstat_seq_write,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
189static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
190{
191 int i = 0;
192 char name[40];
193
194 while (debugfs_queues[i] != NULL) {
195 i++;
196 if (i >= MAX_DEBUGFS_QUEUES)
197 return;
198 }
199 get_queue_name(q, cdev, name);
200 debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
201 debugfs_root, q, &debugfs_fops);
202}
203
204void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
205{
206 struct qdio_q *q;
207 int i;
208
209 mutex_lock(&debugfs_mutex);
210 for_each_input_queue(irq_ptr, q, i)
211 setup_debugfs_entry(q, cdev);
212 for_each_output_queue(irq_ptr, q, i)
213 setup_debugfs_entry(q, cdev);
214 mutex_unlock(&debugfs_mutex);
215}
216
217void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
218{
219 struct qdio_q *q;
220 int i;
221
222 mutex_lock(&debugfs_mutex);
223 for_each_input_queue(irq_ptr, q, i)
224 remove_debugfs_entry(q);
225 for_each_output_queue(irq_ptr, q, i)
226 remove_debugfs_entry(q);
227 mutex_unlock(&debugfs_mutex);
228}
229
230int __init qdio_debug_init(void)
231{
232 debugfs_root = debugfs_create_dir("qdio_queues", NULL);
233 return qdio_register_dbf_views();
234}
235
236void qdio_debug_exit(void)
237{
238 debugfs_remove(debugfs_root);
239 qdio_unregister_dbf_views();
240}
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
new file mode 100644
index 000000000000..8484b83698e1
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.h
@@ -0,0 +1,91 @@
1/*
2 * drivers/s390/cio/qdio_debug.h
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#ifndef QDIO_DEBUG_H
9#define QDIO_DEBUG_H
10
11#include <asm/debug.h>
12#include <asm/qdio.h>
13#include "qdio.h"
14
15#define QDIO_DBF_HEX(ex, name, level, addr, len) \
16 do { \
17 if (ex) \
18 debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \
19 else \
20 debug_event(qdio_dbf_##name, level, (void *)(addr), len); \
21 } while (0)
22#define QDIO_DBF_TEXT(ex, name, level, text) \
23 do { \
24 if (ex) \
25 debug_text_exception(qdio_dbf_##name, level, text); \
26 else \
27 debug_text_event(qdio_dbf_##name, level, text); \
28 } while (0)
29
30#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len)
31#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len)
32#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len)
33
34#ifdef CONFIG_QDIO_DEBUG
35#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len)
36#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len)
37#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len)
38#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len)
39#else
40#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0)
41#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0)
42#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0)
43#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0)
44#endif /* CONFIG_QDIO_DEBUG */
45
46#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text)
47#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text)
48#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text)
49
50#ifdef CONFIG_QDIO_DEBUG
51#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text)
52#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text)
53#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text)
54#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text)
55#else
56#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0)
57#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0)
58#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0)
59#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0)
60#endif /* CONFIG_QDIO_DEBUG */
61
62/* s390dbf views */
63#define QDIO_DBF_SETUP_LEN 8
64#define QDIO_DBF_SETUP_PAGES 4
65#define QDIO_DBF_SETUP_NR_AREAS 1
66
67#define QDIO_DBF_TRACE_LEN 8
68#define QDIO_DBF_TRACE_NR_AREAS 2
69
70#ifdef CONFIG_QDIO_DEBUG
71#define QDIO_DBF_TRACE_PAGES 16
72#define QDIO_DBF_SETUP_LEVEL 6
73#define QDIO_DBF_TRACE_LEVEL 4
74#else /* !CONFIG_QDIO_DEBUG */
75#define QDIO_DBF_TRACE_PAGES 4
76#define QDIO_DBF_SETUP_LEVEL 2
77#define QDIO_DBF_TRACE_LEVEL 2
78#endif /* CONFIG_QDIO_DEBUG */
79
80extern debug_info_t *qdio_dbf_setup;
81extern debug_info_t *qdio_dbf_trace;
82
83void qdio_allocate_do_dbf(struct qdio_initialize *init_data);
84void debug_print_bstat(struct qdio_q *q);
85void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
86 struct ccw_device *cdev);
87void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
88 struct ccw_device *cdev);
89int qdio_debug_init(void);
90void qdio_debug_exit(void);
91#endif
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
new file mode 100644
index 000000000000..d10c73cc1688
--- /dev/null
+++ b/drivers/s390/cio/qdio_main.c
@@ -0,0 +1,1755 @@
1/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 *
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/delay.h>
16#include <asm/atomic.h>
17#include <asm/debug.h>
18#include <asm/qdio.h>
19
20#include "cio.h"
21#include "css.h"
22#include "device.h"
23#include "qdio.h"
24#include "qdio_debug.h"
25#include "qdio_perf.h"
26
27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29MODULE_DESCRIPTION("QDIO base support");
30MODULE_LICENSE("GPL");
31
32static inline int do_siga_sync(struct subchannel_id schid,
33 unsigned int out_mask, unsigned int in_mask)
34{
35 register unsigned long __fc asm ("0") = 2;
36 register struct subchannel_id __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask;
39 int cc;
40
41 asm volatile(
42 " siga 0\n"
43 " ipm %0\n"
44 " srl %0,28\n"
45 : "=d" (cc)
46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
47 return cc;
48}
49
50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
51{
52 register unsigned long __fc asm ("0") = 1;
53 register struct subchannel_id __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask;
55 int cc;
56
57 asm volatile(
58 " siga 0\n"
59 " ipm %0\n"
60 " srl %0,28\n"
61 : "=d" (cc)
62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
63 return cc;
64}
65
66/**
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
72 *
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
75 */
76static inline int do_siga_output(unsigned long schid, unsigned long mask,
77 u32 *bb, unsigned int fc)
78{
79 register unsigned long __fc asm("0") = fc;
80 register unsigned long __schid asm("1") = schid;
81 register unsigned long __mask asm("2") = mask;
82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
83
84 asm volatile(
85 " siga 0\n"
86 "0: ipm %0\n"
87 " srl %0,28\n"
88 "1:\n"
89 EX_TABLE(0b, 1b)
90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
91 : : "cc", "memory");
92 *bb = ((unsigned int) __fc) >> 31;
93 return cc;
94}
95
96static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
97{
98 char dbf_text[15];
99
100 /* all done or next buffer state different */
101 if (ccq == 0 || ccq == 32)
102 return 0;
103 /* not all buffers processed */
104 if (ccq == 96 || ccq == 97)
105 return 1;
106 /* notify devices immediately */
107 sprintf(dbf_text, "%d", ccq);
108 QDIO_DBF_TEXT2(1, trace, dbf_text);
109 return -EIO;
110}
111
112/**
113 * qdio_do_eqbs - extract buffer states for QEBSM
114 * @q: queue to manipulate
115 * @state: state of the extracted buffers
116 * @start: buffer number to start at
117 * @count: count of buffers to examine
118 *
119 * Returns the number of successfull extracted equal buffer states.
120 * Stops processing if a state is different from the last buffers state.
121 */
122static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
123 int start, int count)
124{
125 unsigned int ccq = 0;
126 int tmp_count = count, tmp_start = start;
127 int nr = q->nr;
128 int rc;
129 char dbf_text[15];
130
131 BUG_ON(!q->irq_ptr->sch_token);
132
133 if (!q->is_input_q)
134 nr += q->irq_ptr->nr_input_qs;
135again:
136 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
137 rc = qdio_check_ccq(q, ccq);
138
139 /* At least one buffer was processed, return and extract the remaining
140 * buffers later.
141 */
142 if ((ccq == 96) && (count != tmp_count))
143 return (count - tmp_count);
144 if (rc == 1) {
145 QDIO_DBF_TEXT5(1, trace, "eqAGAIN");
146 goto again;
147 }
148
149 if (rc < 0) {
150 QDIO_DBF_TEXT2(1, trace, "eqberr");
151 sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr);
152 QDIO_DBF_TEXT2(1, trace, dbf_text);
153 q->handler(q->irq_ptr->cdev,
154 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
155 0, -1, -1, q->irq_ptr->int_parm);
156 return 0;
157 }
158 return count - tmp_count;
159}
160
161/**
162 * qdio_do_sqbs - set buffer states for QEBSM
163 * @q: queue to manipulate
164 * @state: new state of the buffers
165 * @start: first buffer number to change
166 * @count: how many buffers to change
167 *
168 * Returns the number of successfully changed buffers.
169 * Does retrying until the specified count of buffer states is set or an
170 * error occurs.
171 */
172static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
173 int count)
174{
175 unsigned int ccq = 0;
176 int tmp_count = count, tmp_start = start;
177 int nr = q->nr;
178 int rc;
179 char dbf_text[15];
180
181 BUG_ON(!q->irq_ptr->sch_token);
182
183 if (!q->is_input_q)
184 nr += q->irq_ptr->nr_input_qs;
185again:
186 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
187 rc = qdio_check_ccq(q, ccq);
188 if (rc == 1) {
189 QDIO_DBF_TEXT5(1, trace, "sqAGAIN");
190 goto again;
191 }
192 if (rc < 0) {
193 QDIO_DBF_TEXT3(1, trace, "sqberr");
194 sprintf(dbf_text, "%2x,%2x", count, tmp_count);
195 QDIO_DBF_TEXT3(1, trace, dbf_text);
196 sprintf(dbf_text, "%d,%d", ccq, nr);
197 QDIO_DBF_TEXT3(1, trace, dbf_text);
198
199 q->handler(q->irq_ptr->cdev,
200 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
201 0, -1, -1, q->irq_ptr->int_parm);
202 return 0;
203 }
204 WARN_ON(tmp_count);
205 return count - tmp_count;
206}
207
208/* returns number of examined buffers and their common state in *state */
209static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
210 unsigned char *state, unsigned int count)
211{
212 unsigned char __state = 0;
213 int i;
214
215 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
216 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
217
218 if (is_qebsm(q))
219 return qdio_do_eqbs(q, state, bufnr, count);
220
221 for (i = 0; i < count; i++) {
222 if (!__state)
223 __state = q->slsb.val[bufnr];
224 else if (q->slsb.val[bufnr] != __state)
225 break;
226 bufnr = next_buf(bufnr);
227 }
228 *state = __state;
229 return i;
230}
231
232inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
233 unsigned char *state)
234{
235 return get_buf_states(q, bufnr, state, 1);
236}
237
238/* wrap-around safe setting of slsb states, returns number of changed buffers */
239static inline int set_buf_states(struct qdio_q *q, int bufnr,
240 unsigned char state, int count)
241{
242 int i;
243
244 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
245 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
246
247 if (is_qebsm(q))
248 return qdio_do_sqbs(q, state, bufnr, count);
249
250 for (i = 0; i < count; i++) {
251 xchg(&q->slsb.val[bufnr], state);
252 bufnr = next_buf(bufnr);
253 }
254 return count;
255}
256
257static inline int set_buf_state(struct qdio_q *q, int bufnr,
258 unsigned char state)
259{
260 return set_buf_states(q, bufnr, state, 1);
261}
262
263/* set slsb states to initial state */
264void qdio_init_buf_states(struct qdio_irq *irq_ptr)
265{
266 struct qdio_q *q;
267 int i;
268
269 for_each_input_queue(irq_ptr, q, i)
270 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
271 QDIO_MAX_BUFFERS_PER_Q);
272 for_each_output_queue(irq_ptr, q, i)
273 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
274 QDIO_MAX_BUFFERS_PER_Q);
275}
276
277static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
278 unsigned int input)
279{
280 int cc;
281
282 if (!need_siga_sync(q))
283 return 0;
284
285 qdio_perf_stat_inc(&perf_stats.siga_sync);
286
287 cc = do_siga_sync(q->irq_ptr->schid, output, input);
288 if (cc) {
289 QDIO_DBF_TEXT4(0, trace, "sigasync");
290 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
291 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
292 }
293 return cc;
294}
295
296inline int qdio_siga_sync_q(struct qdio_q *q)
297{
298 if (q->is_input_q)
299 return qdio_siga_sync(q, 0, q->mask);
300 else
301 return qdio_siga_sync(q, q->mask, 0);
302}
303
304static inline int qdio_siga_sync_out(struct qdio_q *q)
305{
306 return qdio_siga_sync(q, ~0U, 0);
307}
308
309static inline int qdio_siga_sync_all(struct qdio_q *q)
310{
311 return qdio_siga_sync(q, ~0U, ~0U);
312}
313
314static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315{
316 unsigned int fc = 0;
317 unsigned long schid;
318
319 if (!is_qebsm(q))
320 schid = *((u32 *)&q->irq_ptr->schid);
321 else {
322 schid = q->irq_ptr->sch_token;
323 fc |= 0x80;
324 }
325 return do_siga_output(schid, q->mask, busy_bit, fc);
326}
327
328static int qdio_siga_output(struct qdio_q *q)
329{
330 int cc;
331 u32 busy_bit;
332 u64 start_time = 0;
333
334 QDIO_DBF_TEXT5(0, trace, "sigaout");
335 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
336
337 qdio_perf_stat_inc(&perf_stats.siga_out);
338again:
339 cc = qdio_do_siga_output(q, &busy_bit);
340 if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
341 if (!start_time)
342 start_time = get_usecs();
343 else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
344 goto again;
345 }
346
347 if (cc == 2 && busy_bit)
348 cc |= QDIO_ERROR_SIGA_BUSY;
349 if (cc)
350 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
351 return cc;
352}
353
354static inline int qdio_siga_input(struct qdio_q *q)
355{
356 int cc;
357
358 QDIO_DBF_TEXT4(0, trace, "sigain");
359 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
360
361 qdio_perf_stat_inc(&perf_stats.siga_in);
362
363 cc = do_siga_input(q->irq_ptr->schid, q->mask);
364 if (cc)
365 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
366 return cc;
367}
368
369/* called from thinint inbound handler */
370void qdio_sync_after_thinint(struct qdio_q *q)
371{
372 if (pci_out_supported(q)) {
373 if (need_siga_sync_thinint(q))
374 qdio_siga_sync_all(q);
375 else if (need_siga_sync_out_thinint(q))
376 qdio_siga_sync_out(q);
377 } else
378 qdio_siga_sync_q(q);
379}
380
381inline void qdio_stop_polling(struct qdio_q *q)
382{
383 spin_lock_bh(&q->u.in.lock);
384 if (!q->u.in.polling) {
385 spin_unlock_bh(&q->u.in.lock);
386 return;
387 }
388 q->u.in.polling = 0;
389 qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
390
391 /* show the card that we are not polling anymore */
392 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
393 spin_unlock_bh(&q->u.in.lock);
394}
395
396static void announce_buffer_error(struct qdio_q *q)
397{
398 char dbf_text[15];
399
400 if (q->is_input_q)
401 QDIO_DBF_TEXT3(1, trace, "inperr");
402 else
403 QDIO_DBF_TEXT3(0, trace, "outperr");
404
405 sprintf(dbf_text, "%x-%x-%x", q->first_to_check,
406 q->sbal[q->first_to_check]->element[14].flags,
407 q->sbal[q->first_to_check]->element[15].flags);
408 QDIO_DBF_TEXT3(1, trace, dbf_text);
409 QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256);
410
411 q->qdio_error = QDIO_ERROR_SLSB_STATE;
412}
413
414static int get_inbound_buffer_frontier(struct qdio_q *q)
415{
416 int count, stop;
417 unsigned char state;
418
419 /*
420 * If we still poll don't update last_move_ftc, keep the
421 * previously ACK buffer there.
422 */
423 if (!q->u.in.polling)
424 q->last_move_ftc = q->first_to_check;
425
426 /*
427 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
428 * would return 0.
429 */
430 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
431 stop = add_buf(q->first_to_check, count);
432
433 /*
434 * No siga sync here, as a PCI or we after a thin interrupt
435 * will sync the queues.
436 */
437
438 /* need to set count to 1 for non-qebsm */
439 if (!is_qebsm(q))
440 count = 1;
441
442check_next:
443 if (q->first_to_check == stop)
444 goto out;
445
446 count = get_buf_states(q, q->first_to_check, &state, count);
447 if (!count)
448 goto out;
449
450 switch (state) {
451 case SLSB_P_INPUT_PRIMED:
452 QDIO_DBF_TEXT5(0, trace, "inptprim");
453
454 /*
455 * Only ACK the first buffer. The ACK will be removed in
456 * qdio_stop_polling.
457 */
458 if (q->u.in.polling)
459 state = SLSB_P_INPUT_NOT_INIT;
460 else {
461 q->u.in.polling = 1;
462 state = SLSB_P_INPUT_ACK;
463 }
464 set_buf_state(q, q->first_to_check, state);
465
466 /*
467 * Need to change all PRIMED buffers to NOT_INIT, otherwise
468 * we're loosing initiative in the thinint code.
469 */
470 if (count > 1)
471 set_buf_states(q, next_buf(q->first_to_check),
472 SLSB_P_INPUT_NOT_INIT, count - 1);
473
474 /*
475 * No siga-sync needed for non-qebsm here, as the inbound queue
476 * will be synced on the next siga-r, resp.
477 * tiqdio_is_inbound_q_done will do the siga-sync.
478 */
479 q->first_to_check = add_buf(q->first_to_check, count);
480 atomic_sub(count, &q->nr_buf_used);
481 goto check_next;
482 case SLSB_P_INPUT_ERROR:
483 announce_buffer_error(q);
484 /* process the buffer, the upper layer will take care of it */
485 q->first_to_check = add_buf(q->first_to_check, count);
486 atomic_sub(count, &q->nr_buf_used);
487 break;
488 case SLSB_CU_INPUT_EMPTY:
489 case SLSB_P_INPUT_NOT_INIT:
490 case SLSB_P_INPUT_ACK:
491 QDIO_DBF_TEXT5(0, trace, "inpnipro");
492 break;
493 default:
494 BUG();
495 }
496out:
497 QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int));
498 return q->first_to_check;
499}
500
501int qdio_inbound_q_moved(struct qdio_q *q)
502{
503 int bufnr;
504
505 bufnr = get_inbound_buffer_frontier(q);
506
507 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
508 if (!need_siga_sync(q) && !pci_out_supported(q))
509 q->u.in.timestamp = get_usecs();
510
511 QDIO_DBF_TEXT4(0, trace, "inhasmvd");
512 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
513 return 1;
514 } else
515 return 0;
516}
517
518static int qdio_inbound_q_done(struct qdio_q *q)
519{
520 unsigned char state;
521#ifdef CONFIG_QDIO_DEBUG
522 char dbf_text[15];
523#endif
524
525 if (!atomic_read(&q->nr_buf_used))
526 return 1;
527
528 /*
529 * We need that one for synchronization with the adapter, as it
530 * does a kind of PCI avoidance.
531 */
532 qdio_siga_sync_q(q);
533
534 get_buf_state(q, q->first_to_check, &state);
535 if (state == SLSB_P_INPUT_PRIMED)
536 /* we got something to do */
537 return 0;
538
539 /* on VM, we don't poll, so the q is always done here */
540 if (need_siga_sync(q) || pci_out_supported(q))
541 return 1;
542
543 /*
544 * At this point we know, that inbound first_to_check
545 * has (probably) not moved (see qdio_inbound_processing).
546 */
547 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
548#ifdef CONFIG_QDIO_DEBUG
549 QDIO_DBF_TEXT4(0, trace, "inqisdon");
550 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
551 sprintf(dbf_text, "pf%02x", q->first_to_check);
552 QDIO_DBF_TEXT4(0, trace, dbf_text);
553#endif /* CONFIG_QDIO_DEBUG */
554 return 1;
555 } else {
556#ifdef CONFIG_QDIO_DEBUG
557 QDIO_DBF_TEXT4(0, trace, "inqisntd");
558 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
559 sprintf(dbf_text, "pf%02x", q->first_to_check);
560 QDIO_DBF_TEXT4(0, trace, dbf_text);
561#endif /* CONFIG_QDIO_DEBUG */
562 return 0;
563 }
564}
565
566void qdio_kick_inbound_handler(struct qdio_q *q)
567{
568 int count, start, end;
569#ifdef CONFIG_QDIO_DEBUG
570 char dbf_text[15];
571#endif
572
573 qdio_perf_stat_inc(&perf_stats.inbound_handler);
574
575 start = q->first_to_kick;
576 end = q->first_to_check;
577 if (end >= start)
578 count = end - start;
579 else
580 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
581
582#ifdef CONFIG_QDIO_DEBUG
583 sprintf(dbf_text, "s=%2xc=%2x", start, count);
584 QDIO_DBF_TEXT4(0, trace, dbf_text);
585#endif /* CONFIG_QDIO_DEBUG */
586
587 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
588 return;
589
590 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
591 start, count, q->irq_ptr->int_parm);
592
593 /* for the next time */
594 q->first_to_kick = q->first_to_check;
595 q->qdio_error = 0;
596}
597
598static void __qdio_inbound_processing(struct qdio_q *q)
599{
600 qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
601again:
602 if (!qdio_inbound_q_moved(q))
603 return;
604
605 qdio_kick_inbound_handler(q);
606
607 if (!qdio_inbound_q_done(q))
608 /* means poll time is not yet over */
609 goto again;
610
611 qdio_stop_polling(q);
612 /*
613 * We need to check again to not lose initiative after
614 * resetting the ACK state.
615 */
616 if (!qdio_inbound_q_done(q))
617 goto again;
618}
619
620/* inbound tasklet */
621void qdio_inbound_processing(unsigned long data)
622{
623 struct qdio_q *q = (struct qdio_q *)data;
624 __qdio_inbound_processing(q);
625}
626
627static int get_outbound_buffer_frontier(struct qdio_q *q)
628{
629 int count, stop;
630 unsigned char state;
631
632 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
633 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
634 qdio_siga_sync_q(q);
635
636 /*
637 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
638 * would return 0.
639 */
640 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
641 stop = add_buf(q->first_to_check, count);
642
643 /* need to set count to 1 for non-qebsm */
644 if (!is_qebsm(q))
645 count = 1;
646
647check_next:
648 if (q->first_to_check == stop)
649 return q->first_to_check;
650
651 count = get_buf_states(q, q->first_to_check, &state, count);
652 if (!count)
653 return q->first_to_check;
654
655 switch (state) {
656 case SLSB_P_OUTPUT_EMPTY:
657 /* the adapter got it */
658 QDIO_DBF_TEXT5(0, trace, "outpempt");
659
660 atomic_sub(count, &q->nr_buf_used);
661 q->first_to_check = add_buf(q->first_to_check, count);
662 /*
663 * We fetch all buffer states at once. get_buf_states may
664 * return count < stop. For QEBSM we do not loop.
665 */
666 if (is_qebsm(q))
667 break;
668 goto check_next;
669 case SLSB_P_OUTPUT_ERROR:
670 announce_buffer_error(q);
671 /* process the buffer, the upper layer will take care of it */
672 q->first_to_check = add_buf(q->first_to_check, count);
673 atomic_sub(count, &q->nr_buf_used);
674 break;
675 case SLSB_CU_OUTPUT_PRIMED:
676 /* the adapter has not fetched the output yet */
677 QDIO_DBF_TEXT5(0, trace, "outpprim");
678 break;
679 case SLSB_P_OUTPUT_NOT_INIT:
680 case SLSB_P_OUTPUT_HALTED:
681 break;
682 default:
683 BUG();
684 }
685 return q->first_to_check;
686}
687
688/* all buffers processed? */
689static inline int qdio_outbound_q_done(struct qdio_q *q)
690{
691 return atomic_read(&q->nr_buf_used) == 0;
692}
693
694static inline int qdio_outbound_q_moved(struct qdio_q *q)
695{
696 int bufnr;
697
698 bufnr = get_outbound_buffer_frontier(q);
699
700 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
701 q->last_move_ftc = bufnr;
702 QDIO_DBF_TEXT4(0, trace, "oqhasmvd");
703 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
704 return 1;
705 } else
706 return 0;
707}
708
709/*
710 * VM could present us cc=2 and busy bit set on SIGA-write
711 * during reconfiguration of their Guest LAN (only in iqdio mode,
712 * otherwise qdio is asynchronous and cc=2 and busy bit there will take
713 * the queues down immediately).
714 *
715 * Therefore qdio_siga_output will try for a short time constantly,
716 * if such a condition occurs. If it doesn't change, it will
717 * increase the busy_siga_counter and save the timestamp, and
718 * schedule the queue for later processing. qdio_outbound_processing
719 * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
720 * as often as the value of the counter. This will attempt further SIGA
721 * instructions. For each successful SIGA, the counter is
722 * decreased, for failing SIGAs the counter remains the same, after
723 * all. After some time of no movement, qdio_kick_outbound_q will
724 * finally fail and reflect corresponding error codes to call
725 * the upper layer module and have it take the queues down.
726 *
727 * Note that this is a change from the original HiperSockets design
728 * (saying cc=2 and busy bit means take the queues down), but in
729 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
730 * conditions will still take the queues down, but the threshold is
731 * higher due to the Guest LAN environment.
732 *
733 * Called from outbound tasklet and do_QDIO handler.
734 */
735static void qdio_kick_outbound_q(struct qdio_q *q)
736{
737 int rc;
738#ifdef CONFIG_QDIO_DEBUG
739 char dbf_text[15];
740
741 QDIO_DBF_TEXT5(0, trace, "kickoutq");
742 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
743#endif /* CONFIG_QDIO_DEBUG */
744
745 if (!need_siga_out(q))
746 return;
747
748 rc = qdio_siga_output(q);
749 switch (rc) {
750 case 0:
751 /* went smooth this time, reset timestamp */
752 q->u.out.timestamp = 0;
753
754 /* TODO: improve error handling for CC=0 case */
755#ifdef CONFIG_QDIO_DEBUG
756 QDIO_DBF_TEXT3(0, trace, "cc2reslv");
757 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
758 atomic_read(&q->u.out.busy_siga_counter));
759 QDIO_DBF_TEXT3(0, trace, dbf_text);
760#endif /* CONFIG_QDIO_DEBUG */
761 break;
762 /* cc=2 and busy bit */
763 case (2 | QDIO_ERROR_SIGA_BUSY):
764 atomic_inc(&q->u.out.busy_siga_counter);
765
766 /* if the last siga was successful, save timestamp here */
767 if (!q->u.out.timestamp)
768 q->u.out.timestamp = get_usecs();
769
770 /* if we're in time, don't touch qdio_error */
771 if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
772 tasklet_schedule(&q->tasklet);
773 break;
774 }
775 QDIO_DBF_TEXT2(0, trace, "cc2REPRT");
776#ifdef CONFIG_QDIO_DEBUG
777 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
778 atomic_read(&q->u.out.busy_siga_counter));
779 QDIO_DBF_TEXT3(0, trace, dbf_text);
780#endif /* CONFIG_QDIO_DEBUG */
781 default:
782 /* for plain cc=1, 2 or 3 */
783 q->qdio_error = rc;
784 }
785}
786
787static void qdio_kick_outbound_handler(struct qdio_q *q)
788{
789 int start, end, count;
790#ifdef CONFIG_QDIO_DEBUG
791 char dbf_text[15];
792#endif
793
794 start = q->first_to_kick;
795 end = q->last_move_ftc;
796 if (end >= start)
797 count = end - start;
798 else
799 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
800
801#ifdef CONFIG_QDIO_DEBUG
802 QDIO_DBF_TEXT4(0, trace, "kickouth");
803 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
804
805 sprintf(dbf_text, "s=%2xc=%2x", start, count);
806 QDIO_DBF_TEXT4(0, trace, dbf_text);
807#endif /* CONFIG_QDIO_DEBUG */
808
809 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
810 return;
811
812 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
813 q->irq_ptr->int_parm);
814
815 /* for the next time: */
816 q->first_to_kick = q->last_move_ftc;
817 q->qdio_error = 0;
818}
819
820static void __qdio_outbound_processing(struct qdio_q *q)
821{
822 int siga_attempts;
823
824 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
825
826 /* see comment in qdio_kick_outbound_q */
827 siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
828 while (siga_attempts--) {
829 atomic_dec(&q->u.out.busy_siga_counter);
830 qdio_kick_outbound_q(q);
831 }
832
833 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
834
835 if (qdio_outbound_q_moved(q))
836 qdio_kick_outbound_handler(q);
837
838 if (queue_type(q) == QDIO_ZFCP_QFMT) {
839 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
840 tasklet_schedule(&q->tasklet);
841 return;
842 }
843
844 /* bail out for HiperSockets unicast queues */
845 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
846 return;
847
848 if (q->u.out.pci_out_enabled)
849 return;
850
851 /*
852 * Now we know that queue type is either qeth without pci enabled
853 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
854 * EMPTY is noticed and outbound_handler is called after some time.
855 */
856 if (qdio_outbound_q_done(q))
857 del_timer(&q->u.out.timer);
858 else {
859 if (!timer_pending(&q->u.out.timer)) {
860 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
861 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
862 }
863 }
864}
865
866/* outbound tasklet */
867void qdio_outbound_processing(unsigned long data)
868{
869 struct qdio_q *q = (struct qdio_q *)data;
870 __qdio_outbound_processing(q);
871}
872
873void qdio_outbound_timer(unsigned long data)
874{
875 struct qdio_q *q = (struct qdio_q *)data;
876 tasklet_schedule(&q->tasklet);
877}
878
879/* called from thinint inbound tasklet */
880void qdio_check_outbound_after_thinint(struct qdio_q *q)
881{
882 struct qdio_q *out;
883 int i;
884
885 if (!pci_out_supported(q))
886 return;
887
888 for_each_output_queue(q->irq_ptr, out, i)
889 if (!qdio_outbound_q_done(out))
890 tasklet_schedule(&out->tasklet);
891}
892
893static inline void qdio_set_state(struct qdio_irq *irq_ptr,
894 enum qdio_irq_states state)
895{
896#ifdef CONFIG_QDIO_DEBUG
897 char dbf_text[15];
898
899 QDIO_DBF_TEXT5(0, trace, "newstate");
900 sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state);
901 QDIO_DBF_TEXT5(0, trace, dbf_text);
902#endif /* CONFIG_QDIO_DEBUG */
903
904 irq_ptr->state = state;
905 mb();
906}
907
908static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
909{
910 char dbf_text[15];
911
912 if (irb->esw.esw0.erw.cons) {
913 sprintf(dbf_text, "sens%4x", schid.sch_no);
914 QDIO_DBF_TEXT2(1, trace, dbf_text);
915 QDIO_DBF_HEX0(0, trace, irb, 64);
916 QDIO_DBF_HEX0(0, trace, irb->ecw, 64);
917 }
918}
919
920/* PCI interrupt handler */
921static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
922{
923 int i;
924 struct qdio_q *q;
925
926 qdio_perf_stat_inc(&perf_stats.pci_int);
927
928 for_each_input_queue(irq_ptr, q, i)
929 tasklet_schedule(&q->tasklet);
930
931 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
932 return;
933
934 for_each_output_queue(irq_ptr, q, i) {
935 if (qdio_outbound_q_done(q))
936 continue;
937
938 if (!siga_syncs_out_pci(q))
939 qdio_siga_sync_q(q);
940
941 tasklet_schedule(&q->tasklet);
942 }
943}
944
945static void qdio_handle_activate_check(struct ccw_device *cdev,
946 unsigned long intparm, int cstat, int dstat)
947{
948 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
949 struct qdio_q *q;
950 char dbf_text[15];
951
952 QDIO_DBF_TEXT2(1, trace, "ick2");
953 sprintf(dbf_text, "%s", cdev->dev.bus_id);
954 QDIO_DBF_TEXT2(1, trace, dbf_text);
955 QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
956 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
957 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
958
959 if (irq_ptr->nr_input_qs) {
960 q = irq_ptr->input_qs[0];
961 } else if (irq_ptr->nr_output_qs) {
962 q = irq_ptr->output_qs[0];
963 } else {
964 dump_stack();
965 goto no_handler;
966 }
967 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
968 0, -1, -1, irq_ptr->int_parm);
969no_handler:
970 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
971}
972
973static void qdio_call_shutdown(struct work_struct *work)
974{
975 struct ccw_device_private *priv;
976 struct ccw_device *cdev;
977
978 priv = container_of(work, struct ccw_device_private, kick_work);
979 cdev = priv->cdev;
980 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
981 put_device(&cdev->dev);
982}
983
984static void qdio_int_error(struct ccw_device *cdev)
985{
986 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
987
988 switch (irq_ptr->state) {
989 case QDIO_IRQ_STATE_INACTIVE:
990 case QDIO_IRQ_STATE_CLEANUP:
991 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
992 break;
993 case QDIO_IRQ_STATE_ESTABLISHED:
994 case QDIO_IRQ_STATE_ACTIVE:
995 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
996 if (get_device(&cdev->dev)) {
997 /* Can't call shutdown from interrupt context. */
998 PREPARE_WORK(&cdev->private->kick_work,
999 qdio_call_shutdown);
1000 queue_work(ccw_device_work, &cdev->private->kick_work);
1001 }
1002 break;
1003 default:
1004 WARN_ON(1);
1005 }
1006 wake_up(&cdev->private->wait_q);
1007}
1008
1009static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
1010 int dstat)
1011{
1012 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1013
1014 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
1015 QDIO_DBF_TEXT2(1, setup, "eq:ckcon");
1016 goto error;
1017 }
1018
1019 if (!(dstat & DEV_STAT_DEV_END)) {
1020 QDIO_DBF_TEXT2(1, setup, "eq:no de");
1021 goto error;
1022 }
1023
1024 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
1025 QDIO_DBF_TEXT2(1, setup, "eq:badio");
1026 goto error;
1027 }
1028 return 0;
1029error:
1030 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
1031 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
1032 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1033 return 1;
1034}
1035
1036static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1037 int dstat)
1038{
1039 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1040 char dbf_text[15];
1041
1042 sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no);
1043 QDIO_DBF_TEXT0(0, setup, dbf_text);
1044 QDIO_DBF_TEXT0(0, trace, dbf_text);
1045
1046 if (!qdio_establish_check_errors(cdev, cstat, dstat))
1047 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1048}
1049
1050/* qdio interrupt handler */
1051void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1052 struct irb *irb)
1053{
1054 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1055 int cstat, dstat;
1056 char dbf_text[15];
1057
1058 qdio_perf_stat_inc(&perf_stats.qdio_int);
1059
1060 if (!intparm || !irq_ptr) {
1061 sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no);
1062 QDIO_DBF_TEXT2(1, setup, dbf_text);
1063 return;
1064 }
1065
1066 if (IS_ERR(irb)) {
1067 switch (PTR_ERR(irb)) {
1068 case -EIO:
1069 sprintf(dbf_text, "ierr%4x",
1070 cdev->private->schid.sch_no);
1071 QDIO_DBF_TEXT2(1, setup, dbf_text);
1072 qdio_int_error(cdev);
1073 return;
1074 case -ETIMEDOUT:
1075 sprintf(dbf_text, "qtoh%4x",
1076 cdev->private->schid.sch_no);
1077 QDIO_DBF_TEXT2(1, setup, dbf_text);
1078 qdio_int_error(cdev);
1079 return;
1080 default:
1081 WARN_ON(1);
1082 return;
1083 }
1084 }
1085 qdio_irq_check_sense(irq_ptr->schid, irb);
1086
1087 cstat = irb->scsw.cmd.cstat;
1088 dstat = irb->scsw.cmd.dstat;
1089
1090 switch (irq_ptr->state) {
1091 case QDIO_IRQ_STATE_INACTIVE:
1092 qdio_establish_handle_irq(cdev, cstat, dstat);
1093 break;
1094
1095 case QDIO_IRQ_STATE_CLEANUP:
1096 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1097 break;
1098
1099 case QDIO_IRQ_STATE_ESTABLISHED:
1100 case QDIO_IRQ_STATE_ACTIVE:
1101 if (cstat & SCHN_STAT_PCI) {
1102 qdio_int_handler_pci(irq_ptr);
1103 /* no state change so no need to wake up wait_q */
1104 return;
1105 }
1106 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
1107 qdio_handle_activate_check(cdev, intparm, cstat,
1108 dstat);
1109 break;
1110 }
1111 default:
1112 WARN_ON(1);
1113 }
1114 wake_up(&cdev->private->wait_q);
1115}
1116
1117/**
1118 * qdio_get_ssqd_desc - get qdio subchannel description
1119 * @cdev: ccw device to get description for
1120 *
1121 * Returns a pointer to the saved qdio subchannel description,
1122 * or NULL for not setup qdio devices.
1123 */
1124struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev)
1125{
1126 struct qdio_irq *irq_ptr;
1127
1128 QDIO_DBF_TEXT0(0, setup, "getssqd");
1129
1130 irq_ptr = cdev->private->qdio_data;
1131 if (!irq_ptr)
1132 return NULL;
1133
1134 return &irq_ptr->ssqd_desc;
1135}
1136EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1137
1138/**
1139 * qdio_cleanup - shutdown queues and free data structures
1140 * @cdev: associated ccw device
1141 * @how: use halt or clear to shutdown
1142 *
1143 * This function calls qdio_shutdown() for @cdev with method @how
1144 * and on success qdio_free() for @cdev.
1145 */
1146int qdio_cleanup(struct ccw_device *cdev, int how)
1147{
1148 struct qdio_irq *irq_ptr;
1149 char dbf_text[15];
1150 int rc;
1151
1152 irq_ptr = cdev->private->qdio_data;
1153 if (!irq_ptr)
1154 return -ENODEV;
1155
1156 sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no);
1157 QDIO_DBF_TEXT1(0, trace, dbf_text);
1158 QDIO_DBF_TEXT0(0, setup, dbf_text);
1159
1160 rc = qdio_shutdown(cdev, how);
1161 if (rc == 0)
1162 rc = qdio_free(cdev);
1163 return rc;
1164}
1165EXPORT_SYMBOL_GPL(qdio_cleanup);
1166
1167static void qdio_shutdown_queues(struct ccw_device *cdev)
1168{
1169 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1170 struct qdio_q *q;
1171 int i;
1172
1173 for_each_input_queue(irq_ptr, q, i)
1174 tasklet_disable(&q->tasklet);
1175
1176 for_each_output_queue(irq_ptr, q, i) {
1177 tasklet_disable(&q->tasklet);
1178 del_timer(&q->u.out.timer);
1179 }
1180}
1181
1182/**
1183 * qdio_shutdown - shut down a qdio subchannel
1184 * @cdev: associated ccw device
1185 * @how: use halt or clear to shutdown
1186 */
1187int qdio_shutdown(struct ccw_device *cdev, int how)
1188{
1189 struct qdio_irq *irq_ptr;
1190 int rc;
1191 unsigned long flags;
1192 char dbf_text[15];
1193
1194 irq_ptr = cdev->private->qdio_data;
1195 if (!irq_ptr)
1196 return -ENODEV;
1197
1198 mutex_lock(&irq_ptr->setup_mutex);
1199 /*
1200 * Subchannel was already shot down. We cannot prevent being called
1201 * twice since cio may trigger a shutdown asynchronously.
1202 */
1203 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1204 mutex_unlock(&irq_ptr->setup_mutex);
1205 return 0;
1206 }
1207
1208 sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no);
1209 QDIO_DBF_TEXT1(0, trace, dbf_text);
1210 QDIO_DBF_TEXT0(0, setup, dbf_text);
1211
1212 tiqdio_remove_input_queues(irq_ptr);
1213 qdio_shutdown_queues(cdev);
1214 qdio_shutdown_debug_entries(irq_ptr, cdev);
1215
1216 /* cleanup subchannel */
1217 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1218
1219 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1220 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1221 else
1222 /* default behaviour is halt */
1223 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1224 if (rc) {
1225 sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no);
1226 QDIO_DBF_TEXT0(0, setup, dbf_text);
1227 sprintf(dbf_text, "rc=%d", rc);
1228 QDIO_DBF_TEXT0(0, setup, dbf_text);
1229 goto no_cleanup;
1230 }
1231
1232 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1233 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1234 wait_event_interruptible_timeout(cdev->private->wait_q,
1235 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1236 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1237 10 * HZ);
1238 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1239
1240no_cleanup:
1241 qdio_shutdown_thinint(irq_ptr);
1242
1243 /* restore interrupt handler */
1244 if ((void *)cdev->handler == (void *)qdio_int_handler)
1245 cdev->handler = irq_ptr->orig_handler;
1246 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1247
1248 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1249 mutex_unlock(&irq_ptr->setup_mutex);
1250 module_put(THIS_MODULE);
1251 if (rc)
1252 return rc;
1253 return 0;
1254}
1255EXPORT_SYMBOL_GPL(qdio_shutdown);
1256
1257/**
1258 * qdio_free - free data structures for a qdio subchannel
1259 * @cdev: associated ccw device
1260 */
1261int qdio_free(struct ccw_device *cdev)
1262{
1263 struct qdio_irq *irq_ptr;
1264 char dbf_text[15];
1265
1266 irq_ptr = cdev->private->qdio_data;
1267 if (!irq_ptr)
1268 return -ENODEV;
1269
1270 mutex_lock(&irq_ptr->setup_mutex);
1271
1272 sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no);
1273 QDIO_DBF_TEXT1(0, trace, dbf_text);
1274 QDIO_DBF_TEXT0(0, setup, dbf_text);
1275
1276 cdev->private->qdio_data = NULL;
1277 mutex_unlock(&irq_ptr->setup_mutex);
1278
1279 qdio_release_memory(irq_ptr);
1280 return 0;
1281}
1282EXPORT_SYMBOL_GPL(qdio_free);
1283
1284/**
1285 * qdio_initialize - allocate and establish queues for a qdio subchannel
1286 * @init_data: initialization data
1287 *
1288 * This function first allocates queues via qdio_allocate() and on success
1289 * establishes them via qdio_establish().
1290 */
1291int qdio_initialize(struct qdio_initialize *init_data)
1292{
1293 int rc;
1294 char dbf_text[15];
1295
1296 sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no);
1297 QDIO_DBF_TEXT0(0, setup, dbf_text);
1298 QDIO_DBF_TEXT0(0, trace, dbf_text);
1299
1300 rc = qdio_allocate(init_data);
1301 if (rc)
1302 return rc;
1303
1304 rc = qdio_establish(init_data);
1305 if (rc)
1306 qdio_free(init_data->cdev);
1307 return rc;
1308}
1309EXPORT_SYMBOL_GPL(qdio_initialize);
1310
1311/**
1312 * qdio_allocate - allocate qdio queues and associated data
1313 * @init_data: initialization data
1314 */
1315int qdio_allocate(struct qdio_initialize *init_data)
1316{
1317 struct qdio_irq *irq_ptr;
1318 char dbf_text[15];
1319
1320 sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no);
1321 QDIO_DBF_TEXT0(0, setup, dbf_text);
1322 QDIO_DBF_TEXT0(0, trace, dbf_text);
1323
1324 if ((init_data->no_input_qs && !init_data->input_handler) ||
1325 (init_data->no_output_qs && !init_data->output_handler))
1326 return -EINVAL;
1327
1328 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1329 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1330 return -EINVAL;
1331
1332 if ((!init_data->input_sbal_addr_array) ||
1333 (!init_data->output_sbal_addr_array))
1334 return -EINVAL;
1335
1336 qdio_allocate_do_dbf(init_data);
1337
1338 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1339 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1340 if (!irq_ptr)
1341 goto out_err;
1342 QDIO_DBF_TEXT0(0, setup, "irq_ptr:");
1343 QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *));
1344
1345 mutex_init(&irq_ptr->setup_mutex);
1346
1347 /*
1348 * Allocate a page for the chsc calls in qdio_establish.
1349 * Must be pre-allocated since a zfcp recovery will call
1350 * qdio_establish. In case of low memory and swap on a zfcp disk
1351 * we may not be able to allocate memory otherwise.
1352 */
1353 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1354 if (!irq_ptr->chsc_page)
1355 goto out_rel;
1356
1357 /* qdr is used in ccw1.cda which is u32 */
1358 irq_ptr->qdr = kzalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
1359 if (!irq_ptr->qdr)
1360 goto out_rel;
1361 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1362
1363 QDIO_DBF_TEXT0(0, setup, "qdr:");
1364 QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *));
1365
1366 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1367 init_data->no_output_qs))
1368 goto out_rel;
1369
1370 init_data->cdev->private->qdio_data = irq_ptr;
1371 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1372 return 0;
1373out_rel:
1374 qdio_release_memory(irq_ptr);
1375out_err:
1376 return -ENOMEM;
1377}
1378EXPORT_SYMBOL_GPL(qdio_allocate);
1379
1380/**
1381 * qdio_establish - establish queues on a qdio subchannel
1382 * @init_data: initialization data
1383 */
1384int qdio_establish(struct qdio_initialize *init_data)
1385{
1386 char dbf_text[20];
1387 struct qdio_irq *irq_ptr;
1388 struct ccw_device *cdev = init_data->cdev;
1389 unsigned long saveflags;
1390 int rc;
1391
1392 irq_ptr = cdev->private->qdio_data;
1393 if (!irq_ptr)
1394 return -ENODEV;
1395
1396 if (cdev->private->state != DEV_STATE_ONLINE)
1397 return -EINVAL;
1398
1399 if (!try_module_get(THIS_MODULE))
1400 return -EINVAL;
1401
1402 sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
1403 QDIO_DBF_TEXT0(0, setup, dbf_text);
1404 QDIO_DBF_TEXT0(0, trace, dbf_text);
1405
1406 mutex_lock(&irq_ptr->setup_mutex);
1407 qdio_setup_irq(init_data);
1408
1409 rc = qdio_establish_thinint(irq_ptr);
1410 if (rc) {
1411 mutex_unlock(&irq_ptr->setup_mutex);
1412 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1413 return rc;
1414 }
1415
1416 /* establish q */
1417 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1418 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1419 irq_ptr->ccw.count = irq_ptr->equeue.count;
1420 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1421
1422 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1423 ccw_device_set_options_mask(cdev, 0);
1424
1425 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1426 if (rc) {
1427 sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no);
1428 QDIO_DBF_TEXT2(1, setup, dbf_text);
1429 sprintf(dbf_text, "eq:rc%4x", rc);
1430 QDIO_DBF_TEXT2(1, setup, dbf_text);
1431 }
1432 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1433
1434 if (rc) {
1435 mutex_unlock(&irq_ptr->setup_mutex);
1436 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1437 return rc;
1438 }
1439
1440 wait_event_interruptible_timeout(cdev->private->wait_q,
1441 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1442 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1443
1444 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1445 mutex_unlock(&irq_ptr->setup_mutex);
1446 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1447 return -EIO;
1448 }
1449
1450 qdio_setup_ssqd_info(irq_ptr);
1451 sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
1452 QDIO_DBF_TEXT2(0, setup, dbf_text);
1453
1454 /* qebsm is now setup if available, initialize buffer states */
1455 qdio_init_buf_states(irq_ptr);
1456
1457 mutex_unlock(&irq_ptr->setup_mutex);
1458 qdio_print_subchannel_info(irq_ptr, cdev);
1459 qdio_setup_debug_entries(irq_ptr, cdev);
1460 return 0;
1461}
1462EXPORT_SYMBOL_GPL(qdio_establish);
1463
1464/**
1465 * qdio_activate - activate queues on a qdio subchannel
1466 * @cdev: associated cdev
1467 */
1468int qdio_activate(struct ccw_device *cdev)
1469{
1470 struct qdio_irq *irq_ptr;
1471 int rc;
1472 unsigned long saveflags;
1473 char dbf_text[20];
1474
1475 irq_ptr = cdev->private->qdio_data;
1476 if (!irq_ptr)
1477 return -ENODEV;
1478
1479 if (cdev->private->state != DEV_STATE_ONLINE)
1480 return -EINVAL;
1481
1482 mutex_lock(&irq_ptr->setup_mutex);
1483 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1484 rc = -EBUSY;
1485 goto out;
1486 }
1487
1488 sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no);
1489 QDIO_DBF_TEXT2(0, setup, dbf_text);
1490 QDIO_DBF_TEXT2(0, trace, dbf_text);
1491
1492 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1493 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1494 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1495 irq_ptr->ccw.cda = 0;
1496
1497 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1498 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1499
1500 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1501 0, DOIO_DENY_PREFETCH);
1502 if (rc) {
1503 sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no);
1504 QDIO_DBF_TEXT2(1, setup, dbf_text);
1505 sprintf(dbf_text, "aq:rc%4x", rc);
1506 QDIO_DBF_TEXT2(1, setup, dbf_text);
1507 }
1508 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1509
1510 if (rc)
1511 goto out;
1512
1513 if (is_thinint_irq(irq_ptr))
1514 tiqdio_add_input_queues(irq_ptr);
1515
1516 /* wait for subchannel to become active */
1517 msleep(5);
1518
1519 switch (irq_ptr->state) {
1520 case QDIO_IRQ_STATE_STOPPED:
1521 case QDIO_IRQ_STATE_ERR:
1522 mutex_unlock(&irq_ptr->setup_mutex);
1523 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1524 return -EIO;
1525 default:
1526 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1527 rc = 0;
1528 }
1529out:
1530 mutex_unlock(&irq_ptr->setup_mutex);
1531 return rc;
1532}
1533EXPORT_SYMBOL_GPL(qdio_activate);
1534
1535static inline int buf_in_between(int bufnr, int start, int count)
1536{
1537 int end = add_buf(start, count);
1538
1539 if (end > start) {
1540 if (bufnr >= start && bufnr < end)
1541 return 1;
1542 else
1543 return 0;
1544 }
1545
1546 /* wrap-around case */
1547 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1548 (bufnr < end))
1549 return 1;
1550 else
1551 return 0;
1552}
1553
1554/**
1555 * handle_inbound - reset processed input buffers
1556 * @q: queue containing the buffers
1557 * @callflags: flags
1558 * @bufnr: first buffer to process
1559 * @count: how many buffers are emptied
1560 */
1561static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1562 int bufnr, int count)
1563{
1564 unsigned long flags;
1565 int used, rc;
1566
1567 /*
1568 * do_QDIO could run in parallel with the queue tasklet so the
1569 * upper-layer programm could empty the ACK'ed buffer here.
1570 * If that happens we must clear the polling flag, otherwise
1571 * qdio_stop_polling() could set the buffer to NOT_INIT after
1572 * it was set to EMPTY which would kill us.
1573 */
1574 spin_lock_irqsave(&q->u.in.lock, flags);
1575 if (q->u.in.polling)
1576 if (buf_in_between(q->last_move_ftc, bufnr, count))
1577 q->u.in.polling = 0;
1578
1579 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1580 spin_unlock_irqrestore(&q->u.in.lock, flags);
1581
1582 used = atomic_add_return(count, &q->nr_buf_used) - count;
1583 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1584
1585 /* no need to signal as long as the adapter had free buffers */
1586 if (used)
1587 return;
1588
1589 if (need_siga_in(q)) {
1590 rc = qdio_siga_input(q);
1591 if (rc)
1592 q->qdio_error = rc;
1593 }
1594}
1595
1596/**
1597 * handle_outbound - process filled outbound buffers
1598 * @q: queue containing the buffers
1599 * @callflags: flags
1600 * @bufnr: first buffer to process
1601 * @count: how many buffers are filled
1602 */
1603static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1604 int bufnr, int count)
1605{
1606 unsigned char state;
1607 int used;
1608
1609 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1610
1611 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1612 used = atomic_add_return(count, &q->nr_buf_used);
1613 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1614
1615 if (callflags & QDIO_FLAG_PCI_OUT)
1616 q->u.out.pci_out_enabled = 1;
1617 else
1618 q->u.out.pci_out_enabled = 0;
1619
1620 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1621 if (multicast_outbound(q))
1622 qdio_kick_outbound_q(q);
1623 else
1624 /*
1625 * One siga-w per buffer required for unicast
1626 * HiperSockets.
1627 */
1628 while (count--)
1629 qdio_kick_outbound_q(q);
1630 goto out;
1631 }
1632
1633 if (need_siga_sync(q)) {
1634 qdio_siga_sync_q(q);
1635 goto out;
1636 }
1637
1638 /* try to fast requeue buffers */
1639 get_buf_state(q, prev_buf(bufnr), &state);
1640 if (state != SLSB_CU_OUTPUT_PRIMED)
1641 qdio_kick_outbound_q(q);
1642 else {
1643 QDIO_DBF_TEXT5(0, trace, "fast-req");
1644 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1645 }
1646out:
1647 /* Fixme: could wait forever if called from process context */
1648 tasklet_schedule(&q->tasklet);
1649}
1650
1651/**
1652 * do_QDIO - process input or output buffers
1653 * @cdev: associated ccw_device for the qdio subchannel
1654 * @callflags: input or output and special flags from the program
1655 * @q_nr: queue number
1656 * @bufnr: buffer number
1657 * @count: how many buffers to process
1658 */
1659int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1660 int q_nr, int bufnr, int count)
1661{
1662 struct qdio_irq *irq_ptr;
1663#ifdef CONFIG_QDIO_DEBUG
1664 char dbf_text[20];
1665
1666 sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no);
1667 QDIO_DBF_TEXT3(0, trace, dbf_text);
1668#endif /* CONFIG_QDIO_DEBUG */
1669
1670 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1671 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1672 (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
1673 return -EINVAL;
1674
1675 if (!count)
1676 return 0;
1677
1678 irq_ptr = cdev->private->qdio_data;
1679 if (!irq_ptr)
1680 return -ENODEV;
1681
1682#ifdef CONFIG_QDIO_DEBUG
1683 if (callflags & QDIO_FLAG_SYNC_INPUT)
1684 QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr],
1685 sizeof(void *));
1686 else
1687 QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr],
1688 sizeof(void *));
1689
1690 sprintf(dbf_text, "flag%04x", callflags);
1691 QDIO_DBF_TEXT3(0, trace, dbf_text);
1692 sprintf(dbf_text, "qi%02xct%02x", bufnr, count);
1693 QDIO_DBF_TEXT3(0, trace, dbf_text);
1694#endif /* CONFIG_QDIO_DEBUG */
1695
1696 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1697 return -EBUSY;
1698
1699 if (callflags & QDIO_FLAG_SYNC_INPUT)
1700 handle_inbound(irq_ptr->input_qs[q_nr],
1701 callflags, bufnr, count);
1702 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1703 handle_outbound(irq_ptr->output_qs[q_nr],
1704 callflags, bufnr, count);
1705 else {
1706 QDIO_DBF_TEXT3(1, trace, "doQD:inv");
1707 return -EINVAL;
1708 }
1709 return 0;
1710}
1711EXPORT_SYMBOL_GPL(do_QDIO);
1712
1713static int __init init_QDIO(void)
1714{
1715 int rc;
1716
1717 rc = qdio_setup_init();
1718 if (rc)
1719 return rc;
1720 rc = tiqdio_allocate_memory();
1721 if (rc)
1722 goto out_cache;
1723 rc = qdio_debug_init();
1724 if (rc)
1725 goto out_ti;
1726 rc = qdio_setup_perf_stats();
1727 if (rc)
1728 goto out_debug;
1729 rc = tiqdio_register_thinints();
1730 if (rc)
1731 goto out_perf;
1732 return 0;
1733
1734out_perf:
1735 qdio_remove_perf_stats();
1736out_debug:
1737 qdio_debug_exit();
1738out_ti:
1739 tiqdio_free_memory();
1740out_cache:
1741 qdio_setup_exit();
1742 return rc;
1743}
1744
1745static void __exit exit_QDIO(void)
1746{
1747 tiqdio_unregister_thinints();
1748 tiqdio_free_memory();
1749 qdio_remove_perf_stats();
1750 qdio_debug_exit();
1751 qdio_setup_exit();
1752}
1753
1754module_init(init_QDIO);
1755module_exit(exit_QDIO);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
new file mode 100644
index 000000000000..ea01b85b1cc9
--- /dev/null
+++ b/drivers/s390/cio/qdio_perf.c
@@ -0,0 +1,151 @@
1/*
2 * drivers/s390/cio/qdio_perf.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#include <linux/kernel.h>
9#include <linux/proc_fs.h>
10#include <linux/seq_file.h>
11#include <asm/ccwdev.h>
12
13#include "cio.h"
14#include "css.h"
15#include "device.h"
16#include "ioasm.h"
17#include "chsc.h"
18#include "qdio_debug.h"
19#include "qdio_perf.h"
20
21int qdio_performance_stats;
22struct qdio_perf_stats perf_stats;
23
24#ifdef CONFIG_PROC_FS
25static struct proc_dir_entry *qdio_perf_pde;
26#endif
27
28inline void qdio_perf_stat_inc(atomic_long_t *count)
29{
30 if (qdio_performance_stats)
31 atomic_long_inc(count);
32}
33
34inline void qdio_perf_stat_dec(atomic_long_t *count)
35{
36 if (qdio_performance_stats)
37 atomic_long_dec(count);
38}
39
40/*
41 * procfs functions
42 */
43static int qdio_perf_proc_show(struct seq_file *m, void *v)
44{
45 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
46 (long)atomic_long_read(&perf_stats.qdio_int));
47 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
48 (long)atomic_long_read(&perf_stats.pci_int));
49 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
50 (long)atomic_long_read(&perf_stats.thin_int));
51 seq_printf(m, "\n");
52 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
53 (long)atomic_long_read(&perf_stats.tasklet_inbound));
54 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
55 (long)atomic_long_read(&perf_stats.tasklet_outbound));
56 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
57 (long)atomic_long_read(&perf_stats.tasklet_thinint),
58 (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
59 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
60 (long)atomic_long_read(&perf_stats.thinint_inbound),
61 (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
62 seq_printf(m, "\n");
63 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
64 (long)atomic_long_read(&perf_stats.siga_in));
65 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
66 (long)atomic_long_read(&perf_stats.siga_out));
67 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
68 (long)atomic_long_read(&perf_stats.siga_sync));
69 seq_printf(m, "\n");
70 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
71 (long)atomic_long_read(&perf_stats.inbound_handler));
72 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
73 (long)atomic_long_read(&perf_stats.outbound_handler));
74 seq_printf(m, "\n");
75 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
76 (long)atomic_long_read(&perf_stats.fast_requeue));
77 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
78 (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
79 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
80 (long)atomic_long_read(&perf_stats.debug_stop_polling));
81 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
82 (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
83 seq_printf(m, "\n");
84 return 0;
85}
86static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
87{
88 return single_open(filp, qdio_perf_proc_show, NULL);
89}
90
91static struct file_operations qdio_perf_proc_fops = {
92 .owner = THIS_MODULE,
93 .open = qdio_perf_seq_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = single_release,
97};
98
99/*
100 * sysfs functions
101 */
102static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
103{
104 return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
105}
106
107static ssize_t qdio_perf_stats_store(struct bus_type *bus,
108 const char *buf, size_t count)
109{
110 unsigned long i;
111
112 if (strict_strtoul(buf, 16, &i) != 0)
113 return -EINVAL;
114 if ((i != 0) && (i != 1))
115 return -EINVAL;
116 if (i == qdio_performance_stats)
117 return count;
118
119 qdio_performance_stats = i;
120 /* reset performance statistics */
121 if (i == 0)
122 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
123 return count;
124}
125
126static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
127 qdio_perf_stats_store);
128
129int __init qdio_setup_perf_stats(void)
130{
131 int rc;
132
133 rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
134 if (rc)
135 return rc;
136
137#ifdef CONFIG_PROC_FS
138 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
139 qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
140 NULL, &qdio_perf_proc_fops);
141#endif
142 return 0;
143}
144
145void __exit qdio_remove_perf_stats(void)
146{
147#ifdef CONFIG_PROC_FS
148 remove_proc_entry("qdio_perf", NULL);
149#endif
150 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
151}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
new file mode 100644
index 000000000000..5c406a8b7387
--- /dev/null
+++ b/drivers/s390/cio/qdio_perf.h
@@ -0,0 +1,54 @@
1/*
2 * drivers/s390/cio/qdio_perf.h
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#ifndef QDIO_PERF_H
9#define QDIO_PERF_H
10
11#include <linux/types.h>
12#include <linux/device.h>
13#include <asm/atomic.h>
14
15struct qdio_perf_stats {
16 /* interrupt handler calls */
17 atomic_long_t qdio_int;
18 atomic_long_t pci_int;
19 atomic_long_t thin_int;
20
21 /* tasklet runs */
22 atomic_long_t tasklet_inbound;
23 atomic_long_t tasklet_outbound;
24 atomic_long_t tasklet_thinint;
25 atomic_long_t tasklet_thinint_loop;
26 atomic_long_t thinint_inbound;
27 atomic_long_t thinint_inbound_loop;
28 atomic_long_t thinint_inbound_loop2;
29
30 /* signal adapter calls */
31 atomic_long_t siga_out;
32 atomic_long_t siga_in;
33 atomic_long_t siga_sync;
34
35 /* misc */
36 atomic_long_t inbound_handler;
37 atomic_long_t outbound_handler;
38 atomic_long_t fast_requeue;
39
40 /* for debugging */
41 atomic_long_t debug_tl_out_timer;
42 atomic_long_t debug_stop_polling;
43};
44
45extern struct qdio_perf_stats perf_stats;
46extern int qdio_performance_stats;
47
48int qdio_setup_perf_stats(void);
49void qdio_remove_perf_stats(void);
50
51extern void qdio_perf_stat_inc(atomic_long_t *count);
52extern void qdio_perf_stat_dec(atomic_long_t *count);
53
54#endif
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
new file mode 100644
index 000000000000..f0923a8aceda
--- /dev/null
+++ b/drivers/s390/cio/qdio_setup.c
@@ -0,0 +1,521 @@
1/*
2 * driver/s390/cio/qdio_setup.c
3 *
4 * qdio queue initialization
5 *
6 * Copyright (C) IBM Corp. 2008
7 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
8 */
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <asm/qdio.h>
12
13#include "cio.h"
14#include "css.h"
15#include "device.h"
16#include "ioasm.h"
17#include "chsc.h"
18#include "qdio.h"
19#include "qdio_debug.h"
20
21static struct kmem_cache *qdio_q_cache;
22
23/*
24 * qebsm is only available under 64bit but the adapter sets the feature
25 * flag anyway, so we manually override it.
26 */
27static inline int qebsm_possible(void)
28{
29#ifdef CONFIG_64BIT
30 return css_general_characteristics.qebsm;
31#endif
32 return 0;
33}
34
35/*
36 * qib_param_field: pointer to 128 bytes or NULL, if no param field
37 * nr_input_qs: pointer to nr_queues*128 words of data or NULL
38 */
39static void set_impl_params(struct qdio_irq *irq_ptr,
40 unsigned int qib_param_field_format,
41 unsigned char *qib_param_field,
42 unsigned long *input_slib_elements,
43 unsigned long *output_slib_elements)
44{
45 struct qdio_q *q;
46 int i, j;
47
48 if (!irq_ptr)
49 return;
50
51 WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
52 irq_ptr->qib.pfmt = qib_param_field_format;
53 if (qib_param_field)
54 memcpy(irq_ptr->qib.parm, qib_param_field,
55 QDIO_MAX_BUFFERS_PER_Q);
56
57 if (!input_slib_elements)
58 goto output;
59
60 for_each_input_queue(irq_ptr, q, i) {
61 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
62 q->slib->slibe[j].parms =
63 input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
64 }
65output:
66 if (!output_slib_elements)
67 return;
68
69 for_each_output_queue(irq_ptr, q, i) {
70 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
71 q->slib->slibe[j].parms =
72 output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
73 }
74}
75
76static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
77{
78 struct qdio_q *q;
79 int i;
80
81 for (i = 0; i < nr_queues; i++) {
82 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
83 if (!q)
84 return -ENOMEM;
85 WARN_ON((unsigned long)q & 0xff);
86
87 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
88 if (!q->slib) {
89 kmem_cache_free(qdio_q_cache, q);
90 return -ENOMEM;
91 }
92 WARN_ON((unsigned long)q->slib & 0x7ff);
93 irq_ptr_qs[i] = q;
94 }
95 return 0;
96}
97
98int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
99{
100 int rc;
101
102 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
103 if (rc)
104 return rc;
105 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
106 return rc;
107}
108
109static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
110 qdio_handler_t *handler, int i)
111{
112 /* must be cleared by every qdio_establish */
113 memset(q, 0, ((char *)&q->slib) - ((char *)q));
114 memset(q->slib, 0, PAGE_SIZE);
115
116 q->irq_ptr = irq_ptr;
117 q->mask = 1 << (31 - i);
118 q->nr = i;
119 q->handler = handler;
120}
121
122static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
123 void **sbals_array, char *dbf_text, int i)
124{
125 struct qdio_q *prev;
126 int j;
127
128 QDIO_DBF_TEXT0(0, setup, dbf_text);
129 QDIO_DBF_HEX0(0, setup, &q, sizeof(void *));
130
131 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
132
133 /* fill in sbal */
134 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
135 q->sbal[j] = *sbals_array++;
136 WARN_ON((unsigned long)q->sbal[j] & 0xff);
137 }
138
139 /* fill in slib */
140 if (i > 0) {
141 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
142 : irq_ptr->output_qs[i - 1];
143 prev->slib->nsliba = (unsigned long)q->slib;
144 }
145
146 q->slib->sla = (unsigned long)q->sl;
147 q->slib->slsba = (unsigned long)&q->slsb.val[0];
148
149 /* fill in sl */
150 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
151 q->sl->element[j].sbal = (unsigned long)q->sbal[j];
152
153 QDIO_DBF_TEXT2(0, setup, "sl-sb-b0");
154 QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *));
155 QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *));
156 QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *));
157}
158
159static void setup_queues(struct qdio_irq *irq_ptr,
160 struct qdio_initialize *qdio_init)
161{
162 char dbf_text[20];
163 struct qdio_q *q;
164 void **input_sbal_array = qdio_init->input_sbal_addr_array;
165 void **output_sbal_array = qdio_init->output_sbal_addr_array;
166 int i;
167
168 sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no);
169 QDIO_DBF_TEXT0(0, setup, dbf_text);
170
171 for_each_input_queue(irq_ptr, q, i) {
172 sprintf(dbf_text, "in-q%4x", i);
173 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
174
175 q->is_input_q = 1;
176 spin_lock_init(&q->u.in.lock);
177 setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i);
178 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
179
180 if (is_thinint_irq(irq_ptr))
181 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
182 (unsigned long) q);
183 else
184 tasklet_init(&q->tasklet, qdio_inbound_processing,
185 (unsigned long) q);
186 }
187
188 for_each_output_queue(irq_ptr, q, i) {
189 sprintf(dbf_text, "outq%4x", i);
190 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
191
192 q->is_input_q = 0;
193 setup_storage_lists(q, irq_ptr, output_sbal_array,
194 dbf_text, i);
195 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
196
197 tasklet_init(&q->tasklet, qdio_outbound_processing,
198 (unsigned long) q);
199 setup_timer(&q->u.out.timer, (void(*)(unsigned long))
200 &qdio_outbound_timer, (unsigned long)q);
201 }
202}
203
204static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
205{
206 if (qdioac & AC1_SIGA_INPUT_NEEDED)
207 irq_ptr->siga_flag.input = 1;
208 if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
209 irq_ptr->siga_flag.output = 1;
210 if (qdioac & AC1_SIGA_SYNC_NEEDED)
211 irq_ptr->siga_flag.sync = 1;
212 if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
213 irq_ptr->siga_flag.no_sync_ti = 1;
214 if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
215 irq_ptr->siga_flag.no_sync_out_pci = 1;
216
217 if (irq_ptr->siga_flag.no_sync_out_pci &&
218 irq_ptr->siga_flag.no_sync_ti)
219 irq_ptr->siga_flag.no_sync_out_ti = 1;
220}
221
222static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
223 unsigned char qdioac, unsigned long token)
224{
225 char dbf_text[15];
226
227 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
228 goto no_qebsm;
229 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
230 (!(qdioac & AC1_SC_QEBSM_ENABLED)))
231 goto no_qebsm;
232
233 irq_ptr->sch_token = token;
234
235 QDIO_DBF_TEXT0(0, setup, "V=V:1");
236 sprintf(dbf_text, "%8lx", irq_ptr->sch_token);
237 QDIO_DBF_TEXT0(0, setup, dbf_text);
238 return;
239
240no_qebsm:
241 irq_ptr->sch_token = 0;
242 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
243 QDIO_DBF_TEXT0(0, setup, "noV=V");
244}
245
246static int __get_ssqd_info(struct qdio_irq *irq_ptr)
247{
248 struct chsc_ssqd_area *ssqd;
249 int rc;
250
251 QDIO_DBF_TEXT0(0, setup, "getssqd");
252 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
253 memset(ssqd, 0, PAGE_SIZE);
254
255 ssqd->request = (struct chsc_header) {
256 .length = 0x0010,
257 .code = 0x0024,
258 };
259 ssqd->first_sch = irq_ptr->schid.sch_no;
260 ssqd->last_sch = irq_ptr->schid.sch_no;
261 ssqd->ssid = irq_ptr->schid.ssid;
262
263 if (chsc(ssqd))
264 return -EIO;
265 rc = chsc_error_from_response(ssqd->response.code);
266 if (rc)
267 return rc;
268
269 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
270 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
271 (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no))
272 return -EINVAL;
273
274 memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
275 sizeof(struct qdio_ssqd_desc));
276 return 0;
277}
278
279void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
280{
281 unsigned char qdioac;
282 char dbf_text[15];
283 int rc;
284
285 rc = __get_ssqd_info(irq_ptr);
286 if (rc) {
287 QDIO_DBF_TEXT2(0, setup, "ssqdasig");
288 sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no);
289 QDIO_DBF_TEXT2(0, setup, dbf_text);
290 sprintf(dbf_text, "rc:%d", rc);
291 QDIO_DBF_TEXT2(0, setup, dbf_text);
292 /* all flags set, worst case */
293 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
294 AC1_SIGA_SYNC_NEEDED;
295 } else
296 qdioac = irq_ptr->ssqd_desc.qdioac1;
297
298 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
299 process_ac_flags(irq_ptr, qdioac);
300
301 sprintf(dbf_text, "qdioac%2x", qdioac);
302 QDIO_DBF_TEXT2(0, setup, dbf_text);
303}
304
305void qdio_release_memory(struct qdio_irq *irq_ptr)
306{
307 struct qdio_q *q;
308 int i;
309
310 /*
311 * Must check queue array manually since irq_ptr->nr_input_queues /
312 * irq_ptr->nr_input_queues may not yet be set.
313 */
314 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
315 q = irq_ptr->input_qs[i];
316 if (q) {
317 free_page((unsigned long) q->slib);
318 kmem_cache_free(qdio_q_cache, q);
319 }
320 }
321 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
322 q = irq_ptr->output_qs[i];
323 if (q) {
324 free_page((unsigned long) q->slib);
325 kmem_cache_free(qdio_q_cache, q);
326 }
327 }
328 kfree(irq_ptr->qdr);
329 free_page(irq_ptr->chsc_page);
330 free_page((unsigned long) irq_ptr);
331}
332
333static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
334 struct qdio_q **irq_ptr_qs,
335 int i, int nr)
336{
337 irq_ptr->qdr->qdf0[i + nr].sliba =
338 (unsigned long)irq_ptr_qs[i]->slib;
339
340 irq_ptr->qdr->qdf0[i + nr].sla =
341 (unsigned long)irq_ptr_qs[i]->sl;
342
343 irq_ptr->qdr->qdf0[i + nr].slsba =
344 (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
345
346 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY;
347 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY;
348 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY;
349 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY;
350}
351
352static void setup_qdr(struct qdio_irq *irq_ptr,
353 struct qdio_initialize *qdio_init)
354{
355 int i;
356
357 irq_ptr->qdr->qfmt = qdio_init->q_format;
358 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
359 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
360 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
361 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
362 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
363 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY;
364
365 for (i = 0; i < qdio_init->no_input_qs; i++)
366 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
367
368 for (i = 0; i < qdio_init->no_output_qs; i++)
369 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
370 qdio_init->no_input_qs);
371}
372
373static void setup_qib(struct qdio_irq *irq_ptr,
374 struct qdio_initialize *init_data)
375{
376 if (qebsm_possible())
377 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
378
379 irq_ptr->qib.qfmt = init_data->q_format;
380 if (init_data->no_input_qs)
381 irq_ptr->qib.isliba =
382 (unsigned long)(irq_ptr->input_qs[0]->slib);
383 if (init_data->no_output_qs)
384 irq_ptr->qib.osliba =
385 (unsigned long)(irq_ptr->output_qs[0]->slib);
386 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
387}
388
389int qdio_setup_irq(struct qdio_initialize *init_data)
390{
391 struct ciw *ciw;
392 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
393 int rc;
394
395 memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr));
396 /* wipes qib.ac, required by ar7063 */
397 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
398
399 irq_ptr->int_parm = init_data->int_parm;
400 irq_ptr->nr_input_qs = init_data->no_input_qs;
401 irq_ptr->nr_output_qs = init_data->no_output_qs;
402
403 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
404 irq_ptr->cdev = init_data->cdev;
405 setup_queues(irq_ptr, init_data);
406
407 setup_qib(irq_ptr, init_data);
408 qdio_setup_thinint(irq_ptr);
409 set_impl_params(irq_ptr, init_data->qib_param_field_format,
410 init_data->qib_param_field,
411 init_data->input_slib_elements,
412 init_data->output_slib_elements);
413
414 /* fill input and output descriptors */
415 setup_qdr(irq_ptr, init_data);
416
417 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
418
419 /* get qdio commands */
420 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
421 if (!ciw) {
422 QDIO_DBF_TEXT2(1, setup, "no eq");
423 rc = -EINVAL;
424 goto out_err;
425 }
426 irq_ptr->equeue = *ciw;
427
428 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
429 if (!ciw) {
430 QDIO_DBF_TEXT2(1, setup, "no aq");
431 rc = -EINVAL;
432 goto out_err;
433 }
434 irq_ptr->aqueue = *ciw;
435
436 /* set new interrupt handler */
437 irq_ptr->orig_handler = init_data->cdev->handler;
438 init_data->cdev->handler = qdio_int_handler;
439 return 0;
440out_err:
441 qdio_release_memory(irq_ptr);
442 return rc;
443}
444
445void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
446 struct ccw_device *cdev)
447{
448 char s[80];
449
450 sprintf(s, "%s ", cdev->dev.bus_id);
451
452 switch (irq_ptr->qib.qfmt) {
453 case QDIO_QETH_QFMT:
454 sprintf(s + strlen(s), "OSADE ");
455 break;
456 case QDIO_ZFCP_QFMT:
457 sprintf(s + strlen(s), "ZFCP ");
458 break;
459 case QDIO_IQDIO_QFMT:
460 sprintf(s + strlen(s), "HiperSockets ");
461 break;
462 }
463 sprintf(s + strlen(s), "using: ");
464
465 if (!is_thinint_irq(irq_ptr))
466 sprintf(s + strlen(s), "no");
467 sprintf(s + strlen(s), "AdapterInterrupts ");
468 if (!(irq_ptr->sch_token != 0))
469 sprintf(s + strlen(s), "no");
470 sprintf(s + strlen(s), "QEBSM ");
471 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
472 sprintf(s + strlen(s), "no");
473 sprintf(s + strlen(s), "OutboundPCI ");
474 if (!css_general_characteristics.aif_tdd)
475 sprintf(s + strlen(s), "no");
476 sprintf(s + strlen(s), "TDD\n");
477 printk(KERN_INFO "qdio: %s", s);
478
479 memset(s, 0, sizeof(s));
480 sprintf(s, "%s SIGA required: ", cdev->dev.bus_id);
481 if (irq_ptr->siga_flag.input)
482 sprintf(s + strlen(s), "Read ");
483 if (irq_ptr->siga_flag.output)
484 sprintf(s + strlen(s), "Write ");
485 if (irq_ptr->siga_flag.sync)
486 sprintf(s + strlen(s), "Sync ");
487 if (!irq_ptr->siga_flag.no_sync_ti)
488 sprintf(s + strlen(s), "SyncAI ");
489 if (!irq_ptr->siga_flag.no_sync_out_ti)
490 sprintf(s + strlen(s), "SyncOutAI ");
491 if (!irq_ptr->siga_flag.no_sync_out_pci)
492 sprintf(s + strlen(s), "SyncOutPCI");
493 sprintf(s + strlen(s), "\n");
494 printk(KERN_INFO "qdio: %s", s);
495}
496
497int __init qdio_setup_init(void)
498{
499 char dbf_text[15];
500
501 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
502 256, 0, NULL);
503 if (!qdio_q_cache)
504 return -ENOMEM;
505
506 /* Check for OSA/FCP thin interrupts (bit 67). */
507 sprintf(dbf_text, "thini%1x",
508 (css_general_characteristics.aif_osa) ? 1 : 0);
509 QDIO_DBF_TEXT0(0, setup, dbf_text);
510
511 /* Check for QEBSM support in general (bit 58). */
512 sprintf(dbf_text, "cssQBS:%1x",
513 (qebsm_possible()) ? 1 : 0);
514 QDIO_DBF_TEXT0(0, setup, dbf_text);
515 return 0;
516}
517
518void __exit qdio_setup_exit(void)
519{
520 kmem_cache_destroy(qdio_q_cache);
521}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
new file mode 100644
index 000000000000..9291a771d812
--- /dev/null
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -0,0 +1,380 @@
1/*
2 * linux/drivers/s390/cio/thinint_qdio.c
3 *
4 * thin interrupt support for qdio
5 *
6 * Copyright 2000-2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * Jan Glauber <jang@linux.vnet.ibm.com>
10 */
11#include <linux/io.h>
12#include <asm/atomic.h>
13#include <asm/debug.h>
14#include <asm/qdio.h>
15#include <asm/airq.h>
16#include <asm/isc.h>
17
18#include "cio.h"
19#include "ioasm.h"
20#include "qdio.h"
21#include "qdio_debug.h"
22#include "qdio_perf.h"
23
24/*
25 * Restriction: only 63 iqdio subchannels would have its own indicator,
26 * after that, subsequent subchannels share one indicator
27 */
28#define TIQDIO_NR_NONSHARED_IND 63
29#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
30#define TIQDIO_SHARED_IND 63
31
32/* list of thin interrupt input queues */
33static LIST_HEAD(tiq_list);
34
35/* adapter local summary indicator */
36static unsigned char *tiqdio_alsi;
37
38/* device state change indicators */
39struct indicator_t {
40 u32 ind; /* u32 because of compare-and-swap performance */
41 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
42};
43static struct indicator_t *q_indicators;
44
45static void tiqdio_tasklet_fn(unsigned long data);
46static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
47
48static int css_qdio_omit_svs;
49
50static inline unsigned long do_clear_global_summary(void)
51{
52 register unsigned long __fn asm("1") = 3;
53 register unsigned long __tmp asm("2");
54 register unsigned long __time asm("3");
55
56 asm volatile(
57 " .insn rre,0xb2650000,2,0"
58 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
59 return __time;
60}
61
62/* returns addr for the device state change indicator */
63static u32 *get_indicator(void)
64{
65 int i;
66
67 for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
68 if (!atomic_read(&q_indicators[i].count)) {
69 atomic_set(&q_indicators[i].count, 1);
70 return &q_indicators[i].ind;
71 }
72
73 /* use the shared indicator */
74 atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
75 return &q_indicators[TIQDIO_SHARED_IND].ind;
76}
77
78static void put_indicator(u32 *addr)
79{
80 int i;
81
82 if (!addr)
83 return;
84 i = ((unsigned long)addr - (unsigned long)q_indicators) /
85 sizeof(struct indicator_t);
86 atomic_dec(&q_indicators[i].count);
87}
88
89void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
90{
91 struct qdio_q *q;
92 int i;
93
94 /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
95 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
96 css_qdio_omit_svs = 1;
97
98 for_each_input_queue(irq_ptr, q, i) {
99 list_add_rcu(&q->entry, &tiq_list);
100 synchronize_rcu();
101 }
102 xchg(irq_ptr->dsci, 1);
103 tasklet_schedule(&tiqdio_tasklet);
104}
105
106/*
107 * we cannot stop the tiqdio tasklet here since it is for all
108 * thinint qdio devices and it must run as long as there is a
109 * thinint device left
110 */
111void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
112{
113 struct qdio_q *q;
114 int i;
115
116 for_each_input_queue(irq_ptr, q, i) {
117 list_del_rcu(&q->entry);
118 synchronize_rcu();
119 }
120}
121
122static inline int tiqdio_inbound_q_done(struct qdio_q *q)
123{
124 unsigned char state;
125
126 if (!atomic_read(&q->nr_buf_used))
127 return 1;
128
129 qdio_siga_sync_q(q);
130 get_buf_state(q, q->first_to_check, &state);
131
132 if (state == SLSB_P_INPUT_PRIMED)
133 /* more work coming */
134 return 0;
135 return 1;
136}
137
138static inline int shared_ind(struct qdio_irq *irq_ptr)
139{
140 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
141}
142
143static void __tiqdio_inbound_processing(struct qdio_q *q)
144{
145 qdio_perf_stat_inc(&perf_stats.thinint_inbound);
146 qdio_sync_after_thinint(q);
147
148 /*
149 * Maybe we have work on our outbound queues... at least
150 * we have to check the PCI capable queues.
151 */
152 qdio_check_outbound_after_thinint(q);
153
154again:
155 if (!qdio_inbound_q_moved(q))
156 return;
157
158 qdio_kick_inbound_handler(q);
159
160 if (!tiqdio_inbound_q_done(q)) {
161 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
162 goto again;
163 }
164
165 qdio_stop_polling(q);
166 /*
167 * We need to check again to not lose initiative after
168 * resetting the ACK state.
169 */
170 if (!tiqdio_inbound_q_done(q)) {
171 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
172 goto again;
173 }
174}
175
176void tiqdio_inbound_processing(unsigned long data)
177{
178 struct qdio_q *q = (struct qdio_q *)data;
179
180 __tiqdio_inbound_processing(q);
181}
182
183/* check for work on all inbound thinint queues */
184static void tiqdio_tasklet_fn(unsigned long data)
185{
186 struct qdio_q *q;
187
188 qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
189again:
190
191 /* protect tiq_list entries, only changed in activate or shutdown */
192 rcu_read_lock();
193
194 list_for_each_entry_rcu(q, &tiq_list, entry)
195 /* only process queues from changed sets */
196 if (*q->irq_ptr->dsci) {
197
198 /* only clear it if the indicator is non-shared */
199 if (!shared_ind(q->irq_ptr))
200 xchg(q->irq_ptr->dsci, 0);
201 /*
202 * don't call inbound processing directly since
203 * that could starve other thinint queues
204 */
205 tasklet_schedule(&q->tasklet);
206 }
207
208 rcu_read_unlock();
209
210 /*
211 * if we used the shared indicator clear it now after all queues
212 * were processed
213 */
214 if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
215 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
216
217 /* prevent racing */
218 if (*tiqdio_alsi)
219 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
220 }
221
222 /* check for more work */
223 if (*tiqdio_alsi) {
224 xchg(tiqdio_alsi, 0);
225 qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
226 goto again;
227 }
228}
229
230/**
231 * tiqdio_thinint_handler - thin interrupt handler for qdio
232 * @ind: pointer to adapter local summary indicator
233 * @drv_data: NULL
234 */
235static void tiqdio_thinint_handler(void *ind, void *drv_data)
236{
237 qdio_perf_stat_inc(&perf_stats.thin_int);
238
239 /*
240 * SVS only when needed: issue SVS to benefit from iqdio interrupt
241 * avoidance (SVS clears adapter interrupt suppression overwrite)
242 */
243 if (!css_qdio_omit_svs)
244 do_clear_global_summary();
245
246 /*
247 * reset local summary indicator (tiqdio_alsi) to stop adapter
248 * interrupts for now, the tasklet will clean all dsci's
249 */
250 xchg((u8 *)ind, 0);
251 tasklet_hi_schedule(&tiqdio_tasklet);
252}
253
254static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
255{
256 struct scssc_area *scssc_area;
257 char dbf_text[15];
258 void *ptr;
259 int rc;
260
261 scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
262 memset(scssc_area, 0, PAGE_SIZE);
263
264 if (reset) {
265 scssc_area->summary_indicator_addr = 0;
266 scssc_area->subchannel_indicator_addr = 0;
267 } else {
268 scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
269 scssc_area->subchannel_indicator_addr =
270 virt_to_phys(irq_ptr->dsci);
271 }
272
273 scssc_area->request = (struct chsc_header) {
274 .length = 0x0fe0,
275 .code = 0x0021,
276 };
277 scssc_area->operation_code = 0;
278 scssc_area->ks = PAGE_DEFAULT_KEY;
279 scssc_area->kc = PAGE_DEFAULT_KEY;
280 scssc_area->isc = QDIO_AIRQ_ISC;
281 scssc_area->schid = irq_ptr->schid;
282
283 /* enable the time delay disablement facility */
284 if (css_general_characteristics.aif_tdd)
285 scssc_area->word_with_d_bit = 0x10000000;
286
287 rc = chsc(scssc_area);
288 if (rc)
289 return -EIO;
290
291 rc = chsc_error_from_response(scssc_area->response.code);
292 if (rc) {
293 sprintf(dbf_text, "sidR%4x", scssc_area->response.code);
294 QDIO_DBF_TEXT1(0, trace, dbf_text);
295 QDIO_DBF_TEXT1(0, setup, dbf_text);
296 ptr = &scssc_area->response;
297 QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN);
298 return rc;
299 }
300
301 QDIO_DBF_TEXT2(0, setup, "setscind");
302 QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr,
303 sizeof(unsigned long));
304 QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr,
305 sizeof(unsigned long));
306 return 0;
307}
308
309/* allocate non-shared indicators and shared indicator */
310int __init tiqdio_allocate_memory(void)
311{
312 q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
313 GFP_KERNEL);
314 if (!q_indicators)
315 return -ENOMEM;
316 return 0;
317}
318
319void tiqdio_free_memory(void)
320{
321 kfree(q_indicators);
322}
323
324int __init tiqdio_register_thinints(void)
325{
326 char dbf_text[20];
327
328 isc_register(QDIO_AIRQ_ISC);
329 tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
330 NULL, QDIO_AIRQ_ISC);
331 if (IS_ERR(tiqdio_alsi)) {
332 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi));
333 QDIO_DBF_TEXT0(0, setup, dbf_text);
334 tiqdio_alsi = NULL;
335 isc_unregister(QDIO_AIRQ_ISC);
336 return -ENOMEM;
337 }
338 return 0;
339}
340
341int qdio_establish_thinint(struct qdio_irq *irq_ptr)
342{
343 if (!is_thinint_irq(irq_ptr))
344 return 0;
345
346 /* Check for aif time delay disablement. If installed,
347 * omit SVS even under LPAR
348 */
349 if (css_general_characteristics.aif_tdd)
350 css_qdio_omit_svs = 1;
351 return set_subchannel_ind(irq_ptr, 0);
352}
353
354void qdio_setup_thinint(struct qdio_irq *irq_ptr)
355{
356 if (!is_thinint_irq(irq_ptr))
357 return;
358 irq_ptr->dsci = get_indicator();
359 QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *));
360}
361
362void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
363{
364 if (!is_thinint_irq(irq_ptr))
365 return;
366
367 /* reset adapter interrupt indicators */
368 put_indicator(irq_ptr->dsci);
369 set_subchannel_ind(irq_ptr, 1);
370}
371
372void __exit tiqdio_unregister_thinints(void)
373{
374 tasklet_disable(&tiqdio_tasklet);
375
376 if (tiqdio_alsi) {
377 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
378 isc_unregister(QDIO_AIRQ_ISC);
379 }
380}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 699ac11debd8..1895dbb553cd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
239/*not used unless the microcode gets patched*/ 239/*not used unless the microcode gets patched*/
240#define QETH_PCI_TIMER_VALUE(card) 3 240#define QETH_PCI_TIMER_VALUE(card) 3
241 241
242#define QETH_MIN_INPUT_THRESHOLD 1
243#define QETH_MAX_INPUT_THRESHOLD 500
244#define QETH_MIN_OUTPUT_THRESHOLD 1
245#define QETH_MAX_OUTPUT_THRESHOLD 300
246
247/* priority queing */ 242/* priority queing */
248#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING 243#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
249#define QETH_DEFAULT_QUEUE 2 244#define QETH_DEFAULT_QUEUE 2
@@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
811struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, 806struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
812 enum qeth_ipa_cmds, enum qeth_prot_versions); 807 enum qeth_ipa_cmds, enum qeth_prot_versions);
813int qeth_query_setadapterparms(struct qeth_card *); 808int qeth_query_setadapterparms(struct qeth_card *);
814int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, 809int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
815 unsigned int, const char *);
816void qeth_queue_input_buffer(struct qeth_card *, int); 810void qeth_queue_input_buffer(struct qeth_card *, int);
817struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 811struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
818 struct qdio_buffer *, struct qdio_buffer_element **, int *, 812 struct qdio_buffer *, struct qdio_buffer_element **, int *,
819 struct qeth_hdr **); 813 struct qeth_hdr **);
820void qeth_schedule_recovery(struct qeth_card *); 814void qeth_schedule_recovery(struct qeth_card *);
821void qeth_qdio_output_handler(struct ccw_device *, unsigned int, 815void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
822 unsigned int, unsigned int, 816 int, int, int, unsigned long);
823 unsigned int, int, int,
824 unsigned long);
825void qeth_clear_ipacmd_list(struct qeth_card *); 817void qeth_clear_ipacmd_list(struct qeth_card *);
826int qeth_qdio_clear_card(struct qeth_card *, int); 818int qeth_qdio_clear_card(struct qeth_card *, int);
827void qeth_clear_working_pool_list(struct qeth_card *); 819void qeth_clear_working_pool_list(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0ac54dc638c2..c3ad89e302bd 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2073static int qeth_qdio_activate(struct qeth_card *card) 2073static int qeth_qdio_activate(struct qeth_card *card)
2074{ 2074{
2075 QETH_DBF_TEXT(SETUP, 3, "qdioact"); 2075 QETH_DBF_TEXT(SETUP, 3, "qdioact");
2076 return qdio_activate(CARD_DDEV(card), 0); 2076 return qdio_activate(CARD_DDEV(card));
2077} 2077}
2078 2078
2079static int qeth_dm_act(struct qeth_card *card) 2079static int qeth_dm_act(struct qeth_card *card)
@@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card)
2349 card->qdio.in_q->next_buf_to_init = 2349 card->qdio.in_q->next_buf_to_init =
2350 card->qdio.in_buf_pool.buf_count - 1; 2350 card->qdio.in_buf_pool.buf_count - 1;
2351 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, 2351 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2352 card->qdio.in_buf_pool.buf_count - 1, NULL); 2352 card->qdio.in_buf_pool.buf_count - 1);
2353 if (rc) { 2353 if (rc) {
2354 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2354 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2355 return rc; 2355 return rc;
2356 } 2356 }
2357 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
2358 if (rc) {
2359 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2360 return rc;
2361 }
2362 /* outbound queue */ 2357 /* outbound queue */
2363 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2358 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2364 memset(card->qdio.out_qs[i]->qdio_bufs, 0, 2359 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
@@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card)
2559EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); 2554EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2560 2555
2561int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, 2556int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2562 unsigned int siga_error, const char *dbftext) 2557 const char *dbftext)
2563{ 2558{
2564 if (qdio_error || siga_error) { 2559 if (qdio_error) {
2565 QETH_DBF_TEXT(TRACE, 2, dbftext); 2560 QETH_DBF_TEXT(TRACE, 2, dbftext);
2566 QETH_DBF_TEXT(QERR, 2, dbftext); 2561 QETH_DBF_TEXT(QERR, 2, dbftext);
2567 QETH_DBF_TEXT_(QERR, 2, " F15=%02X", 2562 QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
@@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2569 QETH_DBF_TEXT_(QERR, 2, " F14=%02X", 2564 QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
2570 buf->element[14].flags & 0xff); 2565 buf->element[14].flags & 0xff);
2571 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); 2566 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
2572 QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error);
2573 return 1; 2567 return 1;
2574 } 2568 }
2575 return 0; 2569 return 0;
@@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2622 card->perf_stats.inbound_do_qdio_start_time = 2616 card->perf_stats.inbound_do_qdio_start_time =
2623 qeth_get_micros(); 2617 qeth_get_micros();
2624 } 2618 }
2625 rc = do_QDIO(CARD_DDEV(card), 2619 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
2626 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, 2620 queue->next_buf_to_init, count);
2627 0, queue->next_buf_to_init, count, NULL);
2628 if (card->options.performance_stats) 2621 if (card->options.performance_stats)
2629 card->perf_stats.inbound_do_qdio_time += 2622 card->perf_stats.inbound_do_qdio_time +=
2630 qeth_get_micros() - 2623 qeth_get_micros() -
@@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2643EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); 2636EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2644 2637
2645static int qeth_handle_send_error(struct qeth_card *card, 2638static int qeth_handle_send_error(struct qeth_card *card,
2646 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err, 2639 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
2647 unsigned int siga_err)
2648{ 2640{
2649 int sbalf15 = buffer->buffer->element[15].flags & 0xff; 2641 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2650 int cc = siga_err & 3; 2642 int cc = qdio_err & 3;
2651 2643
2652 QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); 2644 QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
2653 qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr"); 2645 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
2654 switch (cc) { 2646 switch (cc) {
2655 case 0: 2647 case 0:
2656 if (qdio_err) { 2648 if (qdio_err) {
@@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
2662 } 2654 }
2663 return QETH_SEND_ERROR_NONE; 2655 return QETH_SEND_ERROR_NONE;
2664 case 2: 2656 case 2:
2665 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) { 2657 if (qdio_err & QDIO_ERROR_SIGA_BUSY) {
2666 QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B"); 2658 QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
2667 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 2659 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2668 return QETH_SEND_ERROR_KICK_IT; 2660 return QETH_SEND_ERROR_KICK_IT;
@@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2758 return 0; 2750 return 0;
2759} 2751}
2760 2752
2761static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, 2753static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2762 int index, int count) 2754 int count)
2763{ 2755{
2764 struct qeth_qdio_out_buffer *buf; 2756 struct qeth_qdio_out_buffer *buf;
2765 int rc; 2757 int rc;
@@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2807 qeth_get_micros(); 2799 qeth_get_micros();
2808 } 2800 }
2809 qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 2801 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
2810 if (under_int)
2811 qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
2812 if (atomic_read(&queue->set_pci_flags_count)) 2802 if (atomic_read(&queue->set_pci_flags_count))
2813 qdio_flags |= QDIO_FLAG_PCI_OUT; 2803 qdio_flags |= QDIO_FLAG_PCI_OUT;
2814 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, 2804 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
2815 queue->queue_no, index, count, NULL); 2805 queue->queue_no, index, count);
2816 if (queue->card->options.performance_stats) 2806 if (queue->card->options.performance_stats)
2817 queue->card->perf_stats.outbound_do_qdio_time += 2807 queue->card->perf_stats.outbound_do_qdio_time +=
2818 qeth_get_micros() - 2808 qeth_get_micros() -
@@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2866 queue->card->perf_stats.bufs_sent_pack += 2856 queue->card->perf_stats.bufs_sent_pack +=
2867 flush_cnt; 2857 flush_cnt;
2868 if (flush_cnt) 2858 if (flush_cnt)
2869 qeth_flush_buffers(queue, 1, index, flush_cnt); 2859 qeth_flush_buffers(queue, index, flush_cnt);
2870 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 2860 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2871 } 2861 }
2872 } 2862 }
2873} 2863}
2874 2864
2875void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, 2865void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2876 unsigned int qdio_error, unsigned int siga_error, 2866 unsigned int qdio_error, int __queue, int first_element,
2877 unsigned int __queue, int first_element, int count, 2867 int count, unsigned long card_ptr)
2878 unsigned long card_ptr)
2879{ 2868{
2880 struct qeth_card *card = (struct qeth_card *) card_ptr; 2869 struct qeth_card *card = (struct qeth_card *) card_ptr;
2881 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 2870 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
@@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
2883 int i; 2872 int i;
2884 2873
2885 QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); 2874 QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
2886 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 2875 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
2887 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 2876 QETH_DBF_TEXT(TRACE, 2, "achkcond");
2888 QETH_DBF_TEXT(TRACE, 2, "achkcond"); 2877 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
2889 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); 2878 netif_stop_queue(card->dev);
2890 QETH_DBF_TEXT_(TRACE, 2, "%08x", status); 2879 qeth_schedule_recovery(card);
2891 netif_stop_queue(card->dev); 2880 return;
2892 qeth_schedule_recovery(card);
2893 return;
2894 }
2895 } 2881 }
2896 if (card->options.performance_stats) { 2882 if (card->options.performance_stats) {
2897 card->perf_stats.outbound_handler_cnt++; 2883 card->perf_stats.outbound_handler_cnt++;
@@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
2901 for (i = first_element; i < (first_element + count); ++i) { 2887 for (i = first_element; i < (first_element + count); ++i) {
2902 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2888 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2903 /*we only handle the KICK_IT error by doing a recovery */ 2889 /*we only handle the KICK_IT error by doing a recovery */
2904 if (qeth_handle_send_error(card, buffer, 2890 if (qeth_handle_send_error(card, buffer, qdio_error)
2905 qdio_error, siga_error)
2906 == QETH_SEND_ERROR_KICK_IT){ 2891 == QETH_SEND_ERROR_KICK_IT){
2907 netif_stop_queue(card->dev); 2892 netif_stop_queue(card->dev);
2908 qeth_schedule_recovery(card); 2893 qeth_schedule_recovery(card);
@@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3164 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3149 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3165 if (ctx == NULL) { 3150 if (ctx == NULL) {
3166 qeth_fill_buffer(queue, buffer, skb); 3151 qeth_fill_buffer(queue, buffer, skb);
3167 qeth_flush_buffers(queue, 0, index, 1); 3152 qeth_flush_buffers(queue, index, 1);
3168 } else { 3153 } else {
3169 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); 3154 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
3170 WARN_ON(buffers_needed != flush_cnt); 3155 WARN_ON(buffers_needed != flush_cnt);
3171 qeth_flush_buffers(queue, 0, index, flush_cnt); 3156 qeth_flush_buffers(queue, index, flush_cnt);
3172 } 3157 }
3173 return 0; 3158 return 0;
3174out: 3159out:
@@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3221 * again */ 3206 * again */
3222 if (atomic_read(&buffer->state) != 3207 if (atomic_read(&buffer->state) !=
3223 QETH_QDIO_BUF_EMPTY){ 3208 QETH_QDIO_BUF_EMPTY){
3224 qeth_flush_buffers(queue, 0, 3209 qeth_flush_buffers(queue, start_index,
3225 start_index, flush_count); 3210 flush_count);
3226 atomic_set(&queue->state, 3211 atomic_set(&queue->state,
3227 QETH_OUT_Q_UNLOCKED); 3212 QETH_OUT_Q_UNLOCKED);
3228 return -EBUSY; 3213 return -EBUSY;
@@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3253 flush_count += tmp; 3238 flush_count += tmp;
3254out: 3239out:
3255 if (flush_count) 3240 if (flush_count)
3256 qeth_flush_buffers(queue, 0, start_index, flush_count); 3241 qeth_flush_buffers(queue, start_index, flush_count);
3257 else if (!atomic_read(&queue->set_pci_flags_count)) 3242 else if (!atomic_read(&queue->set_pci_flags_count))
3258 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); 3243 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3259 /* 3244 /*
@@ -3274,7 +3259,7 @@ out:
3274 if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) 3259 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3275 flush_count += qeth_flush_buffers_on_no_pci(queue); 3260 flush_count += qeth_flush_buffers_on_no_pci(queue);
3276 if (flush_count) 3261 if (flush_count)
3277 qeth_flush_buffers(queue, 0, start_index, flush_count); 3262 qeth_flush_buffers(queue, start_index, flush_count);
3278 } 3263 }
3279 /* at this point the queue is UNLOCKED again */ 3264 /* at this point the queue is UNLOCKED again */
3280 if (queue->card->options.performance_stats && do_pack) 3265 if (queue->card->options.performance_stats && do_pack)
@@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
3686 init_data.q_format = qeth_get_qdio_q_format(card); 3671 init_data.q_format = qeth_get_qdio_q_format(card);
3687 init_data.qib_param_field_format = 0; 3672 init_data.qib_param_field_format = 0;
3688 init_data.qib_param_field = qib_param_field; 3673 init_data.qib_param_field = qib_param_field;
3689 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3690 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3691 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3692 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3693 init_data.no_input_qs = 1; 3674 init_data.no_input_qs = 1;
3694 init_data.no_output_qs = card->qdio.no_out_queues; 3675 init_data.no_output_qs = card->qdio.no_out_queues;
3695 init_data.input_handler = card->discipline.input_handler; 3676 init_data.input_handler = card->discipline.input_handler;
@@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3751 3732
3752int qeth_core_hardsetup_card(struct qeth_card *card) 3733int qeth_core_hardsetup_card(struct qeth_card *card)
3753{ 3734{
3735 struct qdio_ssqd_desc *qdio_ssqd;
3754 int retries = 3; 3736 int retries = 3;
3755 int mpno; 3737 int mpno = 0;
3756 int rc; 3738 int rc;
3757 3739
3758 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3740 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@@ -3784,7 +3766,10 @@ retry:
3784 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3766 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3785 return rc; 3767 return rc;
3786 } 3768 }
3787 mpno = qdio_get_ssqd_pct(CARD_DDEV(card)); 3769
3770 qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card));
3771 if (qdio_ssqd)
3772 mpno = qdio_ssqd->pcnt;
3788 if (mpno) 3773 if (mpno)
3789 mpno = min(mpno - 1, QETH_MAX_PORTNO); 3774 mpno = min(mpno - 1, QETH_MAX_PORTNO);
3790 if (card->info.portno > mpno) { 3775 if (card->info.portno > mpno) {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f682f7b14480..3fbc3bdec0c5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -726,8 +726,7 @@ tx_drop:
726} 726}
727 727
728static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, 728static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
729 unsigned int status, unsigned int qdio_err, 729 unsigned int qdio_err, unsigned int queue,
730 unsigned int siga_err, unsigned int queue,
731 int first_element, int count, unsigned long card_ptr) 730 int first_element, int count, unsigned long card_ptr)
732{ 731{
733 struct net_device *net_dev; 732 struct net_device *net_dev;
@@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
742 card->perf_stats.inbound_cnt++; 741 card->perf_stats.inbound_cnt++;
743 card->perf_stats.inbound_start_time = qeth_get_micros(); 742 card->perf_stats.inbound_start_time = qeth_get_micros();
744 } 743 }
745 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 744 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
746 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 745 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
747 QETH_DBF_TEXT(TRACE, 1, "qdinchk"); 746 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
748 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 747 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
749 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, 748 count);
750 count); 749 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
751 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); 750 qeth_schedule_recovery(card);
752 qeth_schedule_recovery(card); 751 return;
753 return;
754 }
755 } 752 }
756 for (i = first_element; i < (first_element + count); ++i) { 753 for (i = first_element; i < (first_element + count); ++i) {
757 index = i % QDIO_MAX_BUFFERS_PER_Q; 754 index = i % QDIO_MAX_BUFFERS_PER_Q;
758 buffer = &card->qdio.in_q->bufs[index]; 755 buffer = &card->qdio.in_q->bufs[index];
759 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && 756 if (!(qdio_err &&
760 qeth_check_qdio_errors(buffer->buffer, 757 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
761 qdio_err, siga_err, "qinerr")))
762 qeth_l2_process_inbound_buffer(card, buffer, index); 758 qeth_l2_process_inbound_buffer(card, buffer, index);
763 /* clear buffer and give back to hardware */ 759 /* clear buffer and give back to hardware */
764 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 760 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 06deaee50f6d..22f64aa6dd1f 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2939,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2939} 2939}
2940 2940
2941static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, 2941static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2942 unsigned int status, unsigned int qdio_err, 2942 unsigned int qdio_err, unsigned int queue, int first_element,
2943 unsigned int siga_err, unsigned int queue, int first_element,
2944 int count, unsigned long card_ptr) 2943 int count, unsigned long card_ptr)
2945{ 2944{
2946 struct net_device *net_dev; 2945 struct net_device *net_dev;
@@ -2955,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2955 card->perf_stats.inbound_cnt++; 2954 card->perf_stats.inbound_cnt++;
2956 card->perf_stats.inbound_start_time = qeth_get_micros(); 2955 card->perf_stats.inbound_start_time = qeth_get_micros();
2957 } 2956 }
2958 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 2957 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
2959 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 2958 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
2960 QETH_DBF_TEXT(TRACE, 1, "qdinchk"); 2959 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2961 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 2960 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
2962 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", 2961 first_element, count);
2963 first_element, count); 2962 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
2964 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); 2963 qeth_schedule_recovery(card);
2965 qeth_schedule_recovery(card); 2964 return;
2966 return;
2967 }
2968 } 2965 }
2969 for (i = first_element; i < (first_element + count); ++i) { 2966 for (i = first_element; i < (first_element + count); ++i) {
2970 index = i % QDIO_MAX_BUFFERS_PER_Q; 2967 index = i % QDIO_MAX_BUFFERS_PER_Q;
2971 buffer = &card->qdio.in_q->bufs[index]; 2968 buffer = &card->qdio.in_q->bufs[index];
2972 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && 2969 if (!(qdio_err &&
2973 qeth_check_qdio_errors(buffer->buffer, 2970 qeth_check_qdio_errors(buffer->buffer,
2974 qdio_err, siga_err, "qinerr"))) 2971 qdio_err, "qinerr")))
2975 qeth_l3_process_inbound_buffer(card, buffer, index); 2972 qeth_l3_process_inbound_buffer(card, buffer, index);
2976 /* clear buffer and give back to hardware */ 2973 /* clear buffer and give back to hardware */
2977 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 2974 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index d6a78f1a2f16..cb301cc6178c 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -3,7 +3,6 @@
3# 3#
4 4
5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ 5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \ 6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o
7 zfcp_sysfs_unit.o zfcp_sysfs_driver.o
8 7
9obj-$(CONFIG_ZFCP) += zfcp.o 8obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 8c7e2b778ef1..90abfd06ed55 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -1,22 +1,9 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Module interface and handling of zfcp data structures.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22/* 9/*
@@ -31,93 +18,25 @@
31 * Maxim Shchetynin 18 * Maxim Shchetynin
32 * Volker Sameske 19 * Volker Sameske
33 * Ralph Wuerthner 20 * Ralph Wuerthner
21 * Michael Loehr
22 * Swen Schillig
23 * Christof Schmitt
24 * Martin Petermann
25 * Sven Schuetz
34 */ 26 */
35 27
28#include <linux/miscdevice.h>
36#include "zfcp_ext.h" 29#include "zfcp_ext.h"
37 30
38/* accumulated log level (module parameter) */
39static u32 loglevel = ZFCP_LOG_LEVEL_DEFAULTS;
40static char *device; 31static char *device;
41/*********************** FUNCTION PROTOTYPES *********************************/
42
43/* written against the module interface */
44static int __init zfcp_module_init(void);
45
46/* FCP related */
47static void zfcp_ns_gid_pn_handler(unsigned long);
48
49/* miscellaneous */
50static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
51static void zfcp_sg_list_free(struct zfcp_sg_list *);
52static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
53 void __user *, size_t);
54static int zfcp_sg_list_copy_to_user(void __user *,
55 struct zfcp_sg_list *, size_t);
56static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
57
58#define ZFCP_CFDC_IOC_MAGIC 0xDD
59#define ZFCP_CFDC_IOC \
60 _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_sense_data)
61
62
63static const struct file_operations zfcp_cfdc_fops = {
64 .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
65#ifdef CONFIG_COMPAT
66 .compat_ioctl = zfcp_cfdc_dev_ioctl
67#endif
68};
69
70static struct miscdevice zfcp_cfdc_misc = {
71 .minor = ZFCP_CFDC_DEV_MINOR,
72 .name = ZFCP_CFDC_DEV_NAME,
73 .fops = &zfcp_cfdc_fops
74};
75
76/*********************** KERNEL/MODULE PARAMETERS ***************************/
77
78/* declare driver module init/cleanup functions */
79module_init(zfcp_module_init);
80 32
81MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); 33MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
82MODULE_DESCRIPTION 34MODULE_DESCRIPTION("FCP HBA driver");
83 ("FCP (SCSI over Fibre Channel) HBA driver for IBM System z9 and zSeries");
84MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
85 36
86module_param(device, charp, 0400); 37module_param(device, charp, 0400);
87MODULE_PARM_DESC(device, "specify initial device"); 38MODULE_PARM_DESC(device, "specify initial device");
88 39
89module_param(loglevel, uint, 0400);
90MODULE_PARM_DESC(loglevel,
91 "log levels, 8 nibbles: "
92 "FC ERP QDIO CIO Config FSF SCSI Other, "
93 "levels: 0=none 1=normal 2=devel 3=trace");
94
95/****************************************************************/
96/************** Functions without logging ***********************/
97/****************************************************************/
98
99void
100_zfcp_hex_dump(char *addr, int count)
101{
102 int i;
103 for (i = 0; i < count; i++) {
104 printk("%02x", addr[i]);
105 if ((i % 4) == 3)
106 printk(" ");
107 if ((i % 32) == 31)
108 printk("\n");
109 }
110 if (((i-1) % 32) != 31)
111 printk("\n");
112}
113
114
115/****************************************************************/
116/****** Functions to handle the request ID hash table ********/
117/****************************************************************/
118
119#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
120
121static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) 40static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
122{ 41{
123 int idx; 42 int idx;
@@ -132,11 +51,12 @@ static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
132 return 0; 51 return 0;
133} 52}
134 53
135static void zfcp_reqlist_free(struct zfcp_adapter *adapter) 54/**
136{ 55 * zfcp_reqlist_isempty - is the request list empty
137 kfree(adapter->req_list); 56 * @adapter: pointer to struct zfcp_adapter
138} 57 *
139 58 * Returns: true if list is empty, false otherwise
59 */
140int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) 60int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
141{ 61{
142 unsigned int idx; 62 unsigned int idx;
@@ -147,62 +67,58 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
147 return 1; 67 return 1;
148} 68}
149 69
150#undef ZFCP_LOG_AREA 70static int __init zfcp_device_setup(char *devstr)
151
152/****************************************************************/
153/************** Uncategorised Functions *************************/
154/****************************************************************/
155
156#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
157
158/**
159 * zfcp_device_setup - setup function
160 * @str: pointer to parameter string
161 *
162 * Parse "device=..." parameter string.
163 */
164static int __init
165zfcp_device_setup(char *devstr)
166{ 71{
167 char *tmp, *str; 72 char *token;
168 size_t len; 73 char *str;
169 74
170 if (!devstr) 75 if (!devstr)
171 return 0; 76 return 0;
172 77
173 len = strlen(devstr) + 1; 78 /* duplicate devstr and keep the original for sysfs presentation*/
174 str = kmalloc(len, GFP_KERNEL); 79 str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
175 if (!str) 80 if (!str)
176 goto err_out; 81 return 0;
177 memcpy(str, devstr, len);
178 82
179 tmp = strchr(str, ','); 83 strcpy(str, devstr);
180 if (!tmp)
181 goto err_out;
182 *tmp++ = '\0';
183 strncpy(zfcp_data.init_busid, str, BUS_ID_SIZE);
184 zfcp_data.init_busid[BUS_ID_SIZE-1] = '\0';
185 84
186 zfcp_data.init_wwpn = simple_strtoull(tmp, &tmp, 0); 85 token = strsep(&str, ",");
187 if (*tmp++ != ',') 86 if (!token || strlen(token) >= BUS_ID_SIZE)
188 goto err_out; 87 goto err_out;
189 if (*tmp == '\0') 88 strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE);
89
90 token = strsep(&str, ",");
91 if (!token || strict_strtoull(token, 0, &zfcp_data.init_wwpn))
190 goto err_out; 92 goto err_out;
191 93
192 zfcp_data.init_fcp_lun = simple_strtoull(tmp, &tmp, 0); 94 token = strsep(&str, ",");
193 if (*tmp != '\0') 95 if (!token || strict_strtoull(token, 0, &zfcp_data.init_fcp_lun))
194 goto err_out; 96 goto err_out;
97
195 kfree(str); 98 kfree(str);
196 return 1; 99 return 1;
197 100
198 err_out: 101 err_out:
199 ZFCP_LOG_NORMAL("Parse error for device parameter string %s\n", str);
200 kfree(str); 102 kfree(str);
103 pr_err("zfcp: Parse error for device parameter string %s, "
104 "device not attached.\n", devstr);
201 return 0; 105 return 0;
202} 106}
203 107
204static void __init 108static struct zfcp_adapter *zfcp_get_adapter_by_busid(char *bus_id)
205zfcp_init_device_configure(void) 109{
110 struct zfcp_adapter *adapter;
111
112 list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list)
113 if ((strncmp(bus_id, adapter->ccw_device->dev.bus_id,
114 BUS_ID_SIZE) == 0) &&
115 !(atomic_read(&adapter->status) &
116 ZFCP_STATUS_COMMON_REMOVE))
117 return adapter;
118 return NULL;
119}
120
121static void __init zfcp_init_device_configure(void)
206{ 122{
207 struct zfcp_adapter *adapter; 123 struct zfcp_adapter *adapter;
208 struct zfcp_port *port; 124 struct zfcp_port *port;
@@ -215,101 +131,75 @@ zfcp_init_device_configure(void)
215 zfcp_adapter_get(adapter); 131 zfcp_adapter_get(adapter);
216 read_unlock_irq(&zfcp_data.config_lock); 132 read_unlock_irq(&zfcp_data.config_lock);
217 133
218 if (adapter == NULL) 134 if (!adapter)
219 goto out_adapter; 135 goto out_adapter;
220 port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0); 136 port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0);
221 if (!port) 137 if (IS_ERR(port))
222 goto out_port; 138 goto out_port;
223 unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun); 139 unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun);
224 if (!unit) 140 if (IS_ERR(unit))
225 goto out_unit; 141 goto out_unit;
226 up(&zfcp_data.config_sema); 142 up(&zfcp_data.config_sema);
227 ccw_device_set_online(adapter->ccw_device); 143 ccw_device_set_online(adapter->ccw_device);
228 zfcp_erp_wait(adapter); 144 zfcp_erp_wait(adapter);
229 down(&zfcp_data.config_sema); 145 down(&zfcp_data.config_sema);
230 zfcp_unit_put(unit); 146 zfcp_unit_put(unit);
231 out_unit: 147out_unit:
232 zfcp_port_put(port); 148 zfcp_port_put(port);
233 out_port: 149out_port:
234 zfcp_adapter_put(adapter); 150 zfcp_adapter_put(adapter);
235 out_adapter: 151out_adapter:
236 up(&zfcp_data.config_sema); 152 up(&zfcp_data.config_sema);
237 return; 153 return;
238} 154}
239 155
240static int calc_alignment(int size) 156static struct kmem_cache *zfcp_cache_create(int size, char *name)
241{ 157{
242 int align = 1; 158 int align = 1;
243
244 if (!size)
245 return 0;
246
247 while ((size - align) > 0) 159 while ((size - align) > 0)
248 align <<= 1; 160 align <<= 1;
249 161 return kmem_cache_create(name , size, align, 0, NULL);
250 return align;
251} 162}
252 163
253static int __init 164static int __init zfcp_module_init(void)
254zfcp_module_init(void)
255{ 165{
256 int retval = -ENOMEM; 166 int retval = -ENOMEM;
257 int size, align;
258 167
259 size = sizeof(struct zfcp_fsf_req_qtcb); 168 zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create(
260 align = calc_alignment(size); 169 sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf");
261 zfcp_data.fsf_req_qtcb_cache =
262 kmem_cache_create("zfcp_fsf", size, align, 0, NULL);
263 if (!zfcp_data.fsf_req_qtcb_cache) 170 if (!zfcp_data.fsf_req_qtcb_cache)
264 goto out; 171 goto out;
265 172
266 size = sizeof(struct fsf_status_read_buffer); 173 zfcp_data.sr_buffer_cache = zfcp_cache_create(
267 align = calc_alignment(size); 174 sizeof(struct fsf_status_read_buffer), "zfcp_sr");
268 zfcp_data.sr_buffer_cache =
269 kmem_cache_create("zfcp_sr", size, align, 0, NULL);
270 if (!zfcp_data.sr_buffer_cache) 175 if (!zfcp_data.sr_buffer_cache)
271 goto out_sr_cache; 176 goto out_sr_cache;
272 177
273 size = sizeof(struct zfcp_gid_pn_data); 178 zfcp_data.gid_pn_cache = zfcp_cache_create(
274 align = calc_alignment(size); 179 sizeof(struct zfcp_gid_pn_data), "zfcp_gid");
275 zfcp_data.gid_pn_cache =
276 kmem_cache_create("zfcp_gid", size, align, 0, NULL);
277 if (!zfcp_data.gid_pn_cache) 180 if (!zfcp_data.gid_pn_cache)
278 goto out_gid_cache; 181 goto out_gid_cache;
279 182
280 atomic_set(&zfcp_data.loglevel, loglevel);
281
282 /* initialize adapter list */
283 INIT_LIST_HEAD(&zfcp_data.adapter_list_head); 183 INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
284
285 /* initialize adapters to be removed list head */
286 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh); 184 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
287 185
186 sema_init(&zfcp_data.config_sema, 1);
187 rwlock_init(&zfcp_data.config_lock);
188
288 zfcp_data.scsi_transport_template = 189 zfcp_data.scsi_transport_template =
289 fc_attach_transport(&zfcp_transport_functions); 190 fc_attach_transport(&zfcp_transport_functions);
290 if (!zfcp_data.scsi_transport_template) 191 if (!zfcp_data.scsi_transport_template)
291 goto out_transport; 192 goto out_transport;
292 193
293 retval = misc_register(&zfcp_cfdc_misc); 194 retval = misc_register(&zfcp_cfdc_misc);
294 if (retval != 0) { 195 if (retval) {
295 ZFCP_LOG_INFO("registration of misc device " 196 pr_err("zfcp: registration of misc device zfcp_cfdc failed\n");
296 "zfcp_cfdc failed\n");
297 goto out_misc; 197 goto out_misc;
298 } 198 }
299 199
300 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
301 ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor);
302
303 /* Initialise proc semaphores */
304 sema_init(&zfcp_data.config_sema, 1);
305
306 /* initialise configuration rw lock */
307 rwlock_init(&zfcp_data.config_lock);
308
309 /* setup dynamic I/O */
310 retval = zfcp_ccw_register(); 200 retval = zfcp_ccw_register();
311 if (retval) { 201 if (retval) {
312 ZFCP_LOG_NORMAL("registration with common I/O layer failed\n"); 202 pr_err("zfcp: Registration with common I/O layer failed.\n");
313 goto out_ccw_register; 203 goto out_ccw_register;
314 } 204 }
315 205
@@ -318,527 +208,88 @@ zfcp_module_init(void)
318 208
319 goto out; 209 goto out;
320 210
321 out_ccw_register: 211out_ccw_register:
322 misc_deregister(&zfcp_cfdc_misc); 212 misc_deregister(&zfcp_cfdc_misc);
323 out_misc: 213out_misc:
324 fc_release_transport(zfcp_data.scsi_transport_template); 214 fc_release_transport(zfcp_data.scsi_transport_template);
325 out_transport: 215out_transport:
326 kmem_cache_destroy(zfcp_data.gid_pn_cache); 216 kmem_cache_destroy(zfcp_data.gid_pn_cache);
327 out_gid_cache: 217out_gid_cache:
328 kmem_cache_destroy(zfcp_data.sr_buffer_cache); 218 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
329 out_sr_cache: 219out_sr_cache:
330 kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache); 220 kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
331 out: 221out:
332 return retval; 222 return retval;
333} 223}
334 224
335/* 225module_init(zfcp_module_init);
336 * function: zfcp_cfdc_dev_ioctl
337 *
338 * purpose: Handle control file upload/download transaction via IOCTL
339 * interface
340 *
341 * returns: 0 - Operation completed successfuly
342 * -ENOTTY - Unknown IOCTL command
343 * -EINVAL - Invalid sense data record
344 * -ENXIO - The FCP adapter is not available
345 * -EOPNOTSUPP - The FCP adapter does not have CFDC support
346 * -ENOMEM - Insufficient memory
347 * -EFAULT - User space memory I/O operation fault
348 * -EPERM - Cannot create or queue FSF request or create SBALs
349 * -ERESTARTSYS- Received signal (is mapped to EAGAIN by VFS)
350 */
351static long
352zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
353 unsigned long buffer)
354{
355 struct zfcp_cfdc_sense_data *sense_data, __user *sense_data_user;
356 struct zfcp_adapter *adapter = NULL;
357 struct zfcp_fsf_req *fsf_req = NULL;
358 struct zfcp_sg_list *sg_list = NULL;
359 u32 fsf_command, option;
360 char *bus_id = NULL;
361 int retval = 0;
362
363 sense_data = kmalloc(sizeof(struct zfcp_cfdc_sense_data), GFP_KERNEL);
364 if (sense_data == NULL) {
365 retval = -ENOMEM;
366 goto out;
367 }
368
369 sg_list = kzalloc(sizeof(struct zfcp_sg_list), GFP_KERNEL);
370 if (sg_list == NULL) {
371 retval = -ENOMEM;
372 goto out;
373 }
374
375 if (command != ZFCP_CFDC_IOC) {
376 ZFCP_LOG_INFO("IOC request code 0x%x invalid\n", command);
377 retval = -ENOTTY;
378 goto out;
379 }
380
381 if ((sense_data_user = (void __user *) buffer) == NULL) {
382 ZFCP_LOG_INFO("sense data record is required\n");
383 retval = -EINVAL;
384 goto out;
385 }
386
387 retval = copy_from_user(sense_data, sense_data_user,
388 sizeof(struct zfcp_cfdc_sense_data));
389 if (retval) {
390 retval = -EFAULT;
391 goto out;
392 }
393
394 if (sense_data->signature != ZFCP_CFDC_SIGNATURE) {
395 ZFCP_LOG_INFO("invalid sense data request signature 0x%08x\n",
396 ZFCP_CFDC_SIGNATURE);
397 retval = -EINVAL;
398 goto out;
399 }
400
401 switch (sense_data->command) {
402
403 case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
404 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
405 option = FSF_CFDC_OPTION_NORMAL_MODE;
406 break;
407
408 case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
409 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
410 option = FSF_CFDC_OPTION_FORCE;
411 break;
412
413 case ZFCP_CFDC_CMND_FULL_ACCESS:
414 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
415 option = FSF_CFDC_OPTION_FULL_ACCESS;
416 break;
417
418 case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
419 fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
420 option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
421 break;
422
423 case ZFCP_CFDC_CMND_UPLOAD:
424 fsf_command = FSF_QTCB_UPLOAD_CONTROL_FILE;
425 option = 0;
426 break;
427
428 default:
429 ZFCP_LOG_INFO("invalid command code 0x%08x\n",
430 sense_data->command);
431 retval = -EINVAL;
432 goto out;
433 }
434
435 bus_id = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
436 if (bus_id == NULL) {
437 retval = -ENOMEM;
438 goto out;
439 }
440 snprintf(bus_id, BUS_ID_SIZE, "%d.%d.%04x",
441 (sense_data->devno >> 24),
442 (sense_data->devno >> 16) & 0xFF,
443 (sense_data->devno & 0xFFFF));
444
445 read_lock_irq(&zfcp_data.config_lock);
446 adapter = zfcp_get_adapter_by_busid(bus_id);
447 if (adapter)
448 zfcp_adapter_get(adapter);
449 read_unlock_irq(&zfcp_data.config_lock);
450
451 kfree(bus_id);
452
453 if (adapter == NULL) {
454 ZFCP_LOG_INFO("invalid adapter\n");
455 retval = -ENXIO;
456 goto out;
457 }
458
459 if (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE) {
460 retval = zfcp_sg_list_alloc(sg_list,
461 ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
462 if (retval) {
463 retval = -ENOMEM;
464 goto out;
465 }
466 }
467
468 if ((sense_data->command & ZFCP_CFDC_DOWNLOAD) &&
469 (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE)) {
470 retval = zfcp_sg_list_copy_from_user(
471 sg_list, &sense_data_user->control_file,
472 ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
473 if (retval) {
474 retval = -EFAULT;
475 goto out;
476 }
477 }
478
479 retval = zfcp_fsf_control_file(adapter, &fsf_req, fsf_command,
480 option, sg_list);
481 if (retval)
482 goto out;
483
484 if ((fsf_req->qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
485 (fsf_req->qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
486 retval = -ENXIO;
487 goto out;
488 }
489
490 sense_data->fsf_status = fsf_req->qtcb->header.fsf_status;
491 memcpy(&sense_data->fsf_status_qual,
492 &fsf_req->qtcb->header.fsf_status_qual,
493 sizeof(union fsf_status_qual));
494 memcpy(&sense_data->payloads, &fsf_req->qtcb->bottom.support.els, 256);
495
496 retval = copy_to_user(sense_data_user, sense_data,
497 sizeof(struct zfcp_cfdc_sense_data));
498 if (retval) {
499 retval = -EFAULT;
500 goto out;
501 }
502
503 if (sense_data->command & ZFCP_CFDC_UPLOAD) {
504 retval = zfcp_sg_list_copy_to_user(
505 &sense_data_user->control_file, sg_list,
506 ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
507 if (retval) {
508 retval = -EFAULT;
509 goto out;
510 }
511 }
512
513 out:
514 if (fsf_req != NULL)
515 zfcp_fsf_req_free(fsf_req);
516
517 if ((adapter != NULL) && (retval != -ENXIO))
518 zfcp_adapter_put(adapter);
519
520 if (sg_list != NULL) {
521 zfcp_sg_list_free(sg_list);
522 kfree(sg_list);
523 }
524
525 kfree(sense_data);
526
527 return retval;
528}
529
530
531/**
532 * zfcp_sg_list_alloc - create a scatter-gather list of the specified size
533 * @sg_list: structure describing a scatter gather list
534 * @size: size of scatter-gather list
535 * Return: 0 on success, else -ENOMEM
536 *
537 * In sg_list->sg a pointer to the created scatter-gather list is returned,
538 * or NULL if we run out of memory. sg_list->count specifies the number of
539 * elements of the scatter-gather list. The maximum size of a single element
540 * in the scatter-gather list is PAGE_SIZE.
541 */
542static int
543zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
544{
545 struct scatterlist *sg;
546 unsigned int i;
547 int retval = 0;
548 void *address;
549
550 BUG_ON(sg_list == NULL);
551
552 sg_list->count = size >> PAGE_SHIFT;
553 if (size & ~PAGE_MASK)
554 sg_list->count++;
555 sg_list->sg = kcalloc(sg_list->count, sizeof(struct scatterlist),
556 GFP_KERNEL);
557 if (sg_list->sg == NULL) {
558 sg_list->count = 0;
559 retval = -ENOMEM;
560 goto out;
561 }
562 sg_init_table(sg_list->sg, sg_list->count);
563
564 for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) {
565 address = (void *) get_zeroed_page(GFP_KERNEL);
566 if (address == NULL) {
567 sg_list->count = i;
568 zfcp_sg_list_free(sg_list);
569 retval = -ENOMEM;
570 goto out;
571 }
572 zfcp_address_to_sg(address, sg, min(size, PAGE_SIZE));
573 size -= sg->length;
574 }
575
576 out:
577 return retval;
578}
579
580
581/**
582 * zfcp_sg_list_free - free memory of a scatter-gather list
583 * @sg_list: structure describing a scatter-gather list
584 *
585 * Memory for each element in the scatter-gather list is freed.
586 * Finally sg_list->sg is freed itself and sg_list->count is reset.
587 */
588static void
589zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
590{
591 struct scatterlist *sg;
592 unsigned int i;
593
594 BUG_ON(sg_list == NULL);
595
596 for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++)
597 free_page((unsigned long) zfcp_sg_to_address(sg));
598
599 sg_list->count = 0;
600 kfree(sg_list->sg);
601}
602
603/**
604 * zfcp_sg_size - determine size of a scatter-gather list
605 * @sg: array of (struct scatterlist)
606 * @sg_count: elements in array
607 * Return: size of entire scatter-gather list
608 */
609static size_t zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
610{
611 unsigned int i;
612 struct scatterlist *p;
613 size_t size;
614
615 size = 0;
616 for (i = 0, p = sg; i < sg_count; i++, p++) {
617 BUG_ON(p == NULL);
618 size += p->length;
619 }
620
621 return size;
622}
623
624
625/**
626 * zfcp_sg_list_copy_from_user -copy data from user space to scatter-gather list
627 * @sg_list: structure describing a scatter-gather list
628 * @user_buffer: pointer to buffer in user space
629 * @size: number of bytes to be copied
630 * Return: 0 on success, -EFAULT if copy_from_user fails.
631 */
632static int
633zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
634 void __user *user_buffer,
635 size_t size)
636{
637 struct scatterlist *sg;
638 unsigned int length;
639 void *zfcp_buffer;
640 int retval = 0;
641
642 BUG_ON(sg_list == NULL);
643
644 if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
645 return -EFAULT;
646
647 for (sg = sg_list->sg; size > 0; sg++) {
648 length = min((unsigned int)size, sg->length);
649 zfcp_buffer = zfcp_sg_to_address(sg);
650 if (copy_from_user(zfcp_buffer, user_buffer, length)) {
651 retval = -EFAULT;
652 goto out;
653 }
654 user_buffer += length;
655 size -= length;
656 }
657
658 out:
659 return retval;
660}
661
662
663/**
664 * zfcp_sg_list_copy_to_user - copy data from scatter-gather list to user space
665 * @user_buffer: pointer to buffer in user space
666 * @sg_list: structure describing a scatter-gather list
667 * @size: number of bytes to be copied
668 * Return: 0 on success, -EFAULT if copy_to_user fails
669 */
670static int
671zfcp_sg_list_copy_to_user(void __user *user_buffer,
672 struct zfcp_sg_list *sg_list,
673 size_t size)
674{
675 struct scatterlist *sg;
676 unsigned int length;
677 void *zfcp_buffer;
678 int retval = 0;
679
680 BUG_ON(sg_list == NULL);
681
682 if (zfcp_sg_size(sg_list->sg, sg_list->count) < size)
683 return -EFAULT;
684
685 for (sg = sg_list->sg; size > 0; sg++) {
686 length = min((unsigned int) size, sg->length);
687 zfcp_buffer = zfcp_sg_to_address(sg);
688 if (copy_to_user(user_buffer, zfcp_buffer, length)) {
689 retval = -EFAULT;
690 goto out;
691 }
692 user_buffer += length;
693 size -= length;
694 }
695
696 out:
697 return retval;
698}
699
700
701#undef ZFCP_LOG_AREA
702
703/****************************************************************/
704/****** Functions for configuration/set-up of structures ********/
705/****************************************************************/
706
707#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
708 226
709/** 227/**
710 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN 228 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
711 * @port: pointer to port to search for unit 229 * @port: pointer to port to search for unit
712 * @fcp_lun: FCP LUN to search for 230 * @fcp_lun: FCP LUN to search for
713 * Traverse list of all units of a port and return pointer to a unit 231 *
714 * with the given FCP LUN. 232 * Returns: pointer to zfcp_unit or NULL
715 */ 233 */
716struct zfcp_unit * 234struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
717zfcp_get_unit_by_lun(struct zfcp_port *port, fcp_lun_t fcp_lun) 235 fcp_lun_t fcp_lun)
718{ 236{
719 struct zfcp_unit *unit; 237 struct zfcp_unit *unit;
720 int found = 0;
721 238
722 list_for_each_entry(unit, &port->unit_list_head, list) { 239 list_for_each_entry(unit, &port->unit_list_head, list)
723 if ((unit->fcp_lun == fcp_lun) && 240 if ((unit->fcp_lun == fcp_lun) &&
724 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) 241 !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE))
725 { 242 return unit;
726 found = 1; 243 return NULL;
727 break;
728 }
729 }
730 return found ? unit : NULL;
731} 244}
732 245
733/** 246/**
734 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn 247 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
735 * @adapter: pointer to adapter to search for port 248 * @adapter: pointer to adapter to search for port
736 * @wwpn: wwpn to search for 249 * @wwpn: wwpn to search for
737 * Traverse list of all ports of an adapter and return pointer to a port 250 *
738 * with the given wwpn. 251 * Returns: pointer to zfcp_port or NULL
739 */
740struct zfcp_port *
741zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, wwn_t wwpn)
742{
743 struct zfcp_port *port;
744 int found = 0;
745
746 list_for_each_entry(port, &adapter->port_list_head, list) {
747 if ((port->wwpn == wwpn) &&
748 !(atomic_read(&port->status) &
749 (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE))) {
750 found = 1;
751 break;
752 }
753 }
754 return found ? port : NULL;
755}
756
757/**
758 * zfcp_get_port_by_did - find port in port list of adapter by d_id
759 * @adapter: pointer to adapter to search for port
760 * @d_id: d_id to search for
761 * Traverse list of all ports of an adapter and return pointer to a port
762 * with the given d_id.
763 */ 252 */
764struct zfcp_port * 253struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
765zfcp_get_port_by_did(struct zfcp_adapter *adapter, u32 d_id) 254 wwn_t wwpn)
766{ 255{
767 struct zfcp_port *port; 256 struct zfcp_port *port;
768 int found = 0;
769 257
770 list_for_each_entry(port, &adapter->port_list_head, list) { 258 list_for_each_entry(port, &adapter->port_list_head, list)
771 if ((port->d_id == d_id) && 259 if ((port->wwpn == wwpn) && !(atomic_read(&port->status) &
772 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) 260 (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE)))
773 { 261 return port;
774 found = 1; 262 return NULL;
775 break;
776 }
777 }
778 return found ? port : NULL;
779} 263}
780 264
781/** 265static void zfcp_sysfs_unit_release(struct device *dev)
782 * zfcp_get_adapter_by_busid - find adpater in adapter list by bus_id
783 * @bus_id: bus_id to search for
784 * Traverse list of all adapters and return pointer to an adapter
785 * with the given bus_id.
786 */
787struct zfcp_adapter *
788zfcp_get_adapter_by_busid(char *bus_id)
789{ 266{
790 struct zfcp_adapter *adapter; 267 kfree(container_of(dev, struct zfcp_unit, sysfs_device));
791 int found = 0;
792
793 list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list) {
794 if ((strncmp(bus_id, zfcp_get_busid_by_adapter(adapter),
795 BUS_ID_SIZE) == 0) &&
796 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE,
797 &adapter->status)){
798 found = 1;
799 break;
800 }
801 }
802 return found ? adapter : NULL;
803} 268}
804 269
805/** 270/**
806 * zfcp_unit_enqueue - enqueue unit to unit list of a port. 271 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
807 * @port: pointer to port where unit is added 272 * @port: pointer to port where unit is added
808 * @fcp_lun: FCP LUN of unit to be enqueued 273 * @fcp_lun: FCP LUN of unit to be enqueued
809 * Return: pointer to enqueued unit on success, NULL on error 274 * Returns: pointer to enqueued unit on success, ERR_PTR on error
810 * Locks: config_sema must be held to serialize changes to the unit list 275 * Locks: config_sema must be held to serialize changes to the unit list
811 * 276 *
812 * Sets up some unit internal structures and creates sysfs entry. 277 * Sets up some unit internal structures and creates sysfs entry.
813 */ 278 */
814struct zfcp_unit * 279struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
815zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
816{ 280{
817 struct zfcp_unit *unit; 281 struct zfcp_unit *unit;
818 282
819 /* 283 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
820 * check that there is no unit with this FCP_LUN already in list
821 * and enqueue it.
822 * Note: Unlike for the adapter and the port, this is an error
823 */
824 read_lock_irq(&zfcp_data.config_lock);
825 unit = zfcp_get_unit_by_lun(port, fcp_lun);
826 read_unlock_irq(&zfcp_data.config_lock);
827 if (unit)
828 return NULL;
829
830 unit = kzalloc(sizeof (struct zfcp_unit), GFP_KERNEL);
831 if (!unit) 284 if (!unit)
832 return NULL; 285 return ERR_PTR(-ENOMEM);
833 286
834 /* initialise reference count stuff */
835 atomic_set(&unit->refcount, 0); 287 atomic_set(&unit->refcount, 0);
836 init_waitqueue_head(&unit->remove_wq); 288 init_waitqueue_head(&unit->remove_wq);
837 289
838 unit->port = port; 290 unit->port = port;
839 unit->fcp_lun = fcp_lun; 291 unit->fcp_lun = fcp_lun;
840 292
841 /* setup for sysfs registration */
842 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun); 293 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun);
843 unit->sysfs_device.parent = &port->sysfs_device; 294 unit->sysfs_device.parent = &port->sysfs_device;
844 unit->sysfs_device.release = zfcp_sysfs_unit_release; 295 unit->sysfs_device.release = zfcp_sysfs_unit_release;
@@ -847,14 +298,28 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
847 /* mark unit unusable as long as sysfs registration is not complete */ 298 /* mark unit unusable as long as sysfs registration is not complete */
848 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); 299 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
849 300
850 if (device_register(&unit->sysfs_device)) { 301 spin_lock_init(&unit->latencies.lock);
851 kfree(unit); 302 unit->latencies.write.channel.min = 0xFFFFFFFF;
852 return NULL; 303 unit->latencies.write.fabric.min = 0xFFFFFFFF;
304 unit->latencies.read.channel.min = 0xFFFFFFFF;
305 unit->latencies.read.fabric.min = 0xFFFFFFFF;
306 unit->latencies.cmd.channel.min = 0xFFFFFFFF;
307 unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
308
309 read_lock_irq(&zfcp_data.config_lock);
310 if (zfcp_get_unit_by_lun(port, fcp_lun)) {
311 read_unlock_irq(&zfcp_data.config_lock);
312 goto err_out_free;
853 } 313 }
314 read_unlock_irq(&zfcp_data.config_lock);
854 315
855 if (zfcp_sysfs_unit_create_files(&unit->sysfs_device)) { 316 if (device_register(&unit->sysfs_device))
317 goto err_out_free;
318
319 if (sysfs_create_group(&unit->sysfs_device.kobj,
320 &zfcp_sysfs_unit_attrs)) {
856 device_unregister(&unit->sysfs_device); 321 device_unregister(&unit->sysfs_device);
857 return NULL; 322 return ERR_PTR(-EIO);
858 } 323 }
859 324
860 zfcp_unit_get(unit); 325 zfcp_unit_get(unit);
@@ -864,16 +329,27 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
864 list_add_tail(&unit->list, &port->unit_list_head); 329 list_add_tail(&unit->list, &port->unit_list_head);
865 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); 330 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
866 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); 331 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
332
867 write_unlock_irq(&zfcp_data.config_lock); 333 write_unlock_irq(&zfcp_data.config_lock);
868 334
869 port->units++; 335 port->units++;
870 zfcp_port_get(port); 336 zfcp_port_get(port);
871 337
872 return unit; 338 return unit;
339
340err_out_free:
341 kfree(unit);
342 return ERR_PTR(-EINVAL);
873} 343}
874 344
875void 345/**
876zfcp_unit_dequeue(struct zfcp_unit *unit) 346 * zfcp_unit_dequeue - dequeue unit
347 * @unit: pointer to zfcp_unit
348 *
349 * waits until all work is done on unit and removes it then from the unit->list
350 * of the associated port.
351 */
352void zfcp_unit_dequeue(struct zfcp_unit *unit)
877{ 353{
878 zfcp_unit_wait(unit); 354 zfcp_unit_wait(unit);
879 write_lock_irq(&zfcp_data.config_lock); 355 write_lock_irq(&zfcp_data.config_lock);
@@ -881,68 +357,51 @@ zfcp_unit_dequeue(struct zfcp_unit *unit)
881 write_unlock_irq(&zfcp_data.config_lock); 357 write_unlock_irq(&zfcp_data.config_lock);
882 unit->port->units--; 358 unit->port->units--;
883 zfcp_port_put(unit->port); 359 zfcp_port_put(unit->port);
884 zfcp_sysfs_unit_remove_files(&unit->sysfs_device); 360 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
885 device_unregister(&unit->sysfs_device); 361 device_unregister(&unit->sysfs_device);
886} 362}
887 363
888/* 364static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
889 * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
890 * commands.
891 * It also genrates fcp-nameserver request/response buffer and unsolicited
892 * status read fsf_req buffers.
893 *
894 * locks: must only be called with zfcp_data.config_sema taken
895 */
896static int
897zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
898{ 365{
366 /* must only be called with zfcp_data.config_sema taken */
899 adapter->pool.fsf_req_erp = 367 adapter->pool.fsf_req_erp =
900 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ERP_NR, 368 mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
901 zfcp_data.fsf_req_qtcb_cache);
902 if (!adapter->pool.fsf_req_erp) 369 if (!adapter->pool.fsf_req_erp)
903 return -ENOMEM; 370 return -ENOMEM;
904 371
905 adapter->pool.fsf_req_scsi = 372 adapter->pool.fsf_req_scsi =
906 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_SCSI_NR, 373 mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
907 zfcp_data.fsf_req_qtcb_cache);
908 if (!adapter->pool.fsf_req_scsi) 374 if (!adapter->pool.fsf_req_scsi)
909 return -ENOMEM; 375 return -ENOMEM;
910 376
911 adapter->pool.fsf_req_abort = 377 adapter->pool.fsf_req_abort =
912 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ABORT_NR, 378 mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
913 zfcp_data.fsf_req_qtcb_cache);
914 if (!adapter->pool.fsf_req_abort) 379 if (!adapter->pool.fsf_req_abort)
915 return -ENOMEM; 380 return -ENOMEM;
916 381
917 adapter->pool.fsf_req_status_read = 382 adapter->pool.fsf_req_status_read =
918 mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, 383 mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
919 sizeof(struct zfcp_fsf_req)); 384 sizeof(struct zfcp_fsf_req));
920 if (!adapter->pool.fsf_req_status_read) 385 if (!adapter->pool.fsf_req_status_read)
921 return -ENOMEM; 386 return -ENOMEM;
922 387
923 adapter->pool.data_status_read = 388 adapter->pool.data_status_read =
924 mempool_create_slab_pool(ZFCP_POOL_STATUS_READ_NR, 389 mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
925 zfcp_data.sr_buffer_cache); 390 zfcp_data.sr_buffer_cache);
926 if (!adapter->pool.data_status_read) 391 if (!adapter->pool.data_status_read)
927 return -ENOMEM; 392 return -ENOMEM;
928 393
929 adapter->pool.data_gid_pn = 394 adapter->pool.data_gid_pn =
930 mempool_create_slab_pool(ZFCP_POOL_DATA_GID_PN_NR, 395 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
931 zfcp_data.gid_pn_cache);
932 if (!adapter->pool.data_gid_pn) 396 if (!adapter->pool.data_gid_pn)
933 return -ENOMEM; 397 return -ENOMEM;
934 398
935 return 0; 399 return 0;
936} 400}
937 401
938/** 402static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
939 * zfcp_free_low_mem_buffers - free memory pools of an adapter
940 * @adapter: pointer to zfcp_adapter for which memory pools should be freed
941 * locking: zfcp_data.config_sema must be held
942 */
943static void
944zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
945{ 403{
404 /* zfcp_data.config_sema must be held */
946 if (adapter->pool.fsf_req_erp) 405 if (adapter->pool.fsf_req_erp)
947 mempool_destroy(adapter->pool.fsf_req_erp); 406 mempool_destroy(adapter->pool.fsf_req_erp);
948 if (adapter->pool.fsf_req_scsi) 407 if (adapter->pool.fsf_req_scsi)
@@ -962,20 +421,61 @@ static void zfcp_dummy_release(struct device *dev)
962 return; 421 return;
963} 422}
964 423
965/* 424/**
425 * zfcp_status_read_refill - refill the long running status_read_requests
426 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
427 *
428 * Returns: 0 on success, 1 otherwise
429 *
430 * if there are 16 or more status_read requests missing an adapter_reopen
431 * is triggered
432 */
433int zfcp_status_read_refill(struct zfcp_adapter *adapter)
434{
435 while (atomic_read(&adapter->stat_miss) > 0)
436 if (zfcp_fsf_status_read(adapter)) {
437 if (atomic_read(&adapter->stat_miss) >= 16) {
438 zfcp_erp_adapter_reopen(adapter, 0, 103, NULL);
439 return 1;
440 }
441 break;
442 } else
443 atomic_dec(&adapter->stat_miss);
444 return 0;
445}
446
447static void _zfcp_status_read_scheduler(struct work_struct *work)
448{
449 zfcp_status_read_refill(container_of(work, struct zfcp_adapter,
450 stat_work));
451}
452
453static int zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
454{
455 struct zfcp_port *port;
456
457 port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
458 ZFCP_DID_DIRECTORY_SERVICE);
459 if (IS_ERR(port))
460 return PTR_ERR(port);
461 zfcp_port_put(port);
462
463 return 0;
464}
465
466/**
467 * zfcp_adapter_enqueue - enqueue a new adapter to the list
468 * @ccw_device: pointer to the struct cc_device
469 *
470 * Returns: 0 if a new adapter was successfully enqueued
471 * -ENOMEM if alloc failed
966 * Enqueues an adapter at the end of the adapter list in the driver data. 472 * Enqueues an adapter at the end of the adapter list in the driver data.
967 * All adapter internal structures are set up. 473 * All adapter internal structures are set up.
968 * Proc-fs entries are also created. 474 * Proc-fs entries are also created.
969 *
970 * returns: 0 if a new adapter was successfully enqueued
971 * ZFCP_KNOWN if an adapter with this devno was already present
972 * -ENOMEM if alloc failed
973 * locks: config_sema must be held to serialise changes to the adapter list 475 * locks: config_sema must be held to serialise changes to the adapter list
974 */ 476 */
975struct zfcp_adapter * 477int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
976zfcp_adapter_enqueue(struct ccw_device *ccw_device)
977{ 478{
978 int retval = 0;
979 struct zfcp_adapter *adapter; 479 struct zfcp_adapter *adapter;
980 480
981 /* 481 /*
@@ -983,85 +483,58 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
983 * are protected by the config_sema, which must be held to get here 483 * are protected by the config_sema, which must be held to get here
984 */ 484 */
985 485
986 /* try to allocate new adapter data structure (zeroed) */ 486 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
987 adapter = kzalloc(sizeof (struct zfcp_adapter), GFP_KERNEL); 487 if (!adapter)
988 if (!adapter) { 488 return -ENOMEM;
989 ZFCP_LOG_INFO("error: allocation of base adapter "
990 "structure failed\n");
991 goto out;
992 }
993 489
994 ccw_device->handler = NULL; 490 ccw_device->handler = NULL;
995
996 /* save ccw_device pointer */
997 adapter->ccw_device = ccw_device; 491 adapter->ccw_device = ccw_device;
492 atomic_set(&adapter->refcount, 0);
998 493
999 retval = zfcp_qdio_allocate_queues(adapter); 494 if (zfcp_qdio_allocate(adapter))
1000 if (retval)
1001 goto queues_alloc_failed;
1002
1003 retval = zfcp_qdio_allocate(adapter);
1004 if (retval)
1005 goto qdio_allocate_failed; 495 goto qdio_allocate_failed;
1006 496
1007 retval = zfcp_allocate_low_mem_buffers(adapter); 497 if (zfcp_allocate_low_mem_buffers(adapter))
1008 if (retval) {
1009 ZFCP_LOG_INFO("error: pool allocation failed\n");
1010 goto failed_low_mem_buffers; 498 goto failed_low_mem_buffers;
1011 }
1012 499
1013 /* initialise reference count stuff */ 500 if (zfcp_reqlist_alloc(adapter))
1014 atomic_set(&adapter->refcount, 0); 501 goto failed_low_mem_buffers;
502
503 if (zfcp_adapter_debug_register(adapter))
504 goto debug_register_failed;
505
1015 init_waitqueue_head(&adapter->remove_wq); 506 init_waitqueue_head(&adapter->remove_wq);
507 init_waitqueue_head(&adapter->erp_thread_wqh);
508 init_waitqueue_head(&adapter->erp_done_wqh);
1016 509
1017 /* initialise list of ports */
1018 INIT_LIST_HEAD(&adapter->port_list_head); 510 INIT_LIST_HEAD(&adapter->port_list_head);
1019
1020 /* initialise list of ports to be removed */
1021 INIT_LIST_HEAD(&adapter->port_remove_lh); 511 INIT_LIST_HEAD(&adapter->port_remove_lh);
512 INIT_LIST_HEAD(&adapter->erp_ready_head);
513 INIT_LIST_HEAD(&adapter->erp_running_head);
1022 514
1023 /* initialize list of fsf requests */
1024 spin_lock_init(&adapter->req_list_lock); 515 spin_lock_init(&adapter->req_list_lock);
1025 retval = zfcp_reqlist_alloc(adapter);
1026 if (retval) {
1027 ZFCP_LOG_INFO("request list initialization failed\n");
1028 goto failed_low_mem_buffers;
1029 }
1030
1031 /* initialize debug locks */
1032 516
1033 spin_lock_init(&adapter->hba_dbf_lock); 517 spin_lock_init(&adapter->hba_dbf_lock);
1034 spin_lock_init(&adapter->san_dbf_lock); 518 spin_lock_init(&adapter->san_dbf_lock);
1035 spin_lock_init(&adapter->scsi_dbf_lock); 519 spin_lock_init(&adapter->scsi_dbf_lock);
1036 spin_lock_init(&adapter->rec_dbf_lock); 520 spin_lock_init(&adapter->rec_dbf_lock);
1037 521 spin_lock_init(&adapter->req_q.lock);
1038 retval = zfcp_adapter_debug_register(adapter);
1039 if (retval)
1040 goto debug_register_failed;
1041
1042 /* initialize error recovery stuff */
1043 522
1044 rwlock_init(&adapter->erp_lock); 523 rwlock_init(&adapter->erp_lock);
1045 sema_init(&adapter->erp_ready_sem, 0);
1046 INIT_LIST_HEAD(&adapter->erp_ready_head);
1047 INIT_LIST_HEAD(&adapter->erp_running_head);
1048
1049 /* initialize abort lock */
1050 rwlock_init(&adapter->abort_lock); 524 rwlock_init(&adapter->abort_lock);
1051 525
1052 /* initialise some erp stuff */ 526 sema_init(&adapter->erp_ready_sem, 0);
1053 init_waitqueue_head(&adapter->erp_thread_wqh);
1054 init_waitqueue_head(&adapter->erp_done_wqh);
1055 527
1056 /* initialize lock of associated request queue */ 528 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
1057 rwlock_init(&adapter->request_queue.queue_lock); 529 INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);
1058 530
1059 /* mark adapter unusable as long as sysfs registration is not complete */ 531 /* mark adapter unusable as long as sysfs registration is not complete */
1060 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 532 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
1061 533
1062 dev_set_drvdata(&ccw_device->dev, adapter); 534 dev_set_drvdata(&ccw_device->dev, adapter);
1063 535
1064 if (zfcp_sysfs_adapter_create_files(&ccw_device->dev)) 536 if (sysfs_create_group(&ccw_device->dev.kobj,
537 &zfcp_sysfs_adapter_attrs))
1065 goto sysfs_failed; 538 goto sysfs_failed;
1066 539
1067 adapter->generic_services.parent = &adapter->ccw_device->dev; 540 adapter->generic_services.parent = &adapter->ccw_device->dev;
@@ -1072,7 +545,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1072 if (device_register(&adapter->generic_services)) 545 if (device_register(&adapter->generic_services))
1073 goto generic_services_failed; 546 goto generic_services_failed;
1074 547
1075 /* put allocated adapter at list tail */
1076 write_lock_irq(&zfcp_data.config_lock); 548 write_lock_irq(&zfcp_data.config_lock);
1077 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 549 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
1078 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head); 550 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
@@ -1080,57 +552,49 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1080 552
1081 zfcp_data.adapters++; 553 zfcp_data.adapters++;
1082 554
1083 goto out; 555 zfcp_nameserver_enqueue(adapter);
556
557 return 0;
1084 558
1085 generic_services_failed: 559generic_services_failed:
1086 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); 560 sysfs_remove_group(&ccw_device->dev.kobj,
1087 sysfs_failed: 561 &zfcp_sysfs_adapter_attrs);
562sysfs_failed:
1088 zfcp_adapter_debug_unregister(adapter); 563 zfcp_adapter_debug_unregister(adapter);
1089 debug_register_failed: 564debug_register_failed:
1090 dev_set_drvdata(&ccw_device->dev, NULL); 565 dev_set_drvdata(&ccw_device->dev, NULL);
1091 zfcp_reqlist_free(adapter); 566 kfree(adapter->req_list);
1092 failed_low_mem_buffers: 567failed_low_mem_buffers:
1093 zfcp_free_low_mem_buffers(adapter); 568 zfcp_free_low_mem_buffers(adapter);
1094 if (qdio_free(ccw_device) != 0) 569qdio_allocate_failed:
1095 ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n", 570 zfcp_qdio_free(adapter);
1096 zfcp_get_busid_by_adapter(adapter));
1097 qdio_allocate_failed:
1098 zfcp_qdio_free_queues(adapter);
1099 queues_alloc_failed:
1100 kfree(adapter); 571 kfree(adapter);
1101 adapter = NULL; 572 return -ENOMEM;
1102 out:
1103 return adapter;
1104} 573}
1105 574
1106/* 575/**
1107 * returns: 0 - struct zfcp_adapter data structure successfully removed 576 * zfcp_adapter_dequeue - remove the adapter from the resource list
1108 * !0 - struct zfcp_adapter data structure could not be removed 577 * @adapter: pointer to struct zfcp_adapter which should be removed
1109 * (e.g. still used)
1110 * locks: adapter list write lock is assumed to be held by caller 578 * locks: adapter list write lock is assumed to be held by caller
1111 */ 579 */
1112void 580void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1113zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1114{ 581{
1115 int retval = 0; 582 int retval = 0;
1116 unsigned long flags; 583 unsigned long flags;
1117 584
585 cancel_work_sync(&adapter->scan_work);
586 cancel_work_sync(&adapter->stat_work);
1118 zfcp_adapter_scsi_unregister(adapter); 587 zfcp_adapter_scsi_unregister(adapter);
1119 device_unregister(&adapter->generic_services); 588 device_unregister(&adapter->generic_services);
1120 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); 589 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
590 &zfcp_sysfs_adapter_attrs);
1121 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 591 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
1122 /* sanity check: no pending FSF requests */ 592 /* sanity check: no pending FSF requests */
1123 spin_lock_irqsave(&adapter->req_list_lock, flags); 593 spin_lock_irqsave(&adapter->req_list_lock, flags);
1124 retval = zfcp_reqlist_isempty(adapter); 594 retval = zfcp_reqlist_isempty(adapter);
1125 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 595 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
1126 if (!retval) { 596 if (!retval)
1127 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " 597 return;
1128 "%i requests outstanding\n",
1129 zfcp_get_busid_by_adapter(adapter), adapter,
1130 atomic_read(&adapter->reqs_active));
1131 retval = -EBUSY;
1132 goto out;
1133 }
1134 598
1135 zfcp_adapter_debug_unregister(adapter); 599 zfcp_adapter_debug_unregister(adapter);
1136 600
@@ -1142,26 +606,18 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1142 /* decrease number of adapters in list */ 606 /* decrease number of adapters in list */
1143 zfcp_data.adapters--; 607 zfcp_data.adapters--;
1144 608
1145 ZFCP_LOG_TRACE("adapter %s (%p) removed from list, " 609 zfcp_qdio_free(adapter);
1146 "%i adapters still in list\n",
1147 zfcp_get_busid_by_adapter(adapter),
1148 adapter, zfcp_data.adapters);
1149
1150 retval = qdio_free(adapter->ccw_device);
1151 if (retval)
1152 ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
1153 zfcp_get_busid_by_adapter(adapter));
1154 610
1155 zfcp_free_low_mem_buffers(adapter); 611 zfcp_free_low_mem_buffers(adapter);
1156 /* free memory of adapter data structure and queues */ 612 kfree(adapter->req_list);
1157 zfcp_qdio_free_queues(adapter);
1158 zfcp_reqlist_free(adapter);
1159 kfree(adapter->fc_stats); 613 kfree(adapter->fc_stats);
1160 kfree(adapter->stats_reset_data); 614 kfree(adapter->stats_reset_data);
1161 ZFCP_LOG_TRACE("freeing adapter structure\n");
1162 kfree(adapter); 615 kfree(adapter);
1163 out: 616}
1164 return; 617
618static void zfcp_sysfs_port_release(struct device *dev)
619{
620 kfree(container_of(dev, struct zfcp_port, sysfs_device));
1165} 621}
1166 622
1167/** 623/**
@@ -1170,98 +626,90 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1170 * @wwpn: WWPN of the remote port to be enqueued 626 * @wwpn: WWPN of the remote port to be enqueued
1171 * @status: initial status for the port 627 * @status: initial status for the port
1172 * @d_id: destination id of the remote port to be enqueued 628 * @d_id: destination id of the remote port to be enqueued
1173 * Return: pointer to enqueued port on success, NULL on error 629 * Returns: pointer to enqueued port on success, ERR_PTR on error
1174 * Locks: config_sema must be held to serialize changes to the port list 630 * Locks: config_sema must be held to serialize changes to the port list
1175 * 631 *
1176 * All port internal structures are set up and the sysfs entry is generated. 632 * All port internal structures are set up and the sysfs entry is generated.
1177 * d_id is used to enqueue ports with a well known address like the Directory 633 * d_id is used to enqueue ports with a well known address like the Directory
1178 * Service for nameserver lookup. 634 * Service for nameserver lookup.
1179 */ 635 */
1180struct zfcp_port * 636struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
1181zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status, 637 u32 status, u32 d_id)
1182 u32 d_id)
1183{ 638{
1184 struct zfcp_port *port; 639 struct zfcp_port *port;
1185 int check_wwpn; 640 int retval;
1186 641 char *bus_id;
1187 check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN);
1188 /*
1189 * check that there is no port with this WWPN already in list
1190 */
1191 if (check_wwpn) {
1192 read_lock_irq(&zfcp_data.config_lock);
1193 port = zfcp_get_port_by_wwpn(adapter, wwpn);
1194 read_unlock_irq(&zfcp_data.config_lock);
1195 if (port)
1196 return NULL;
1197 }
1198 642
1199 port = kzalloc(sizeof (struct zfcp_port), GFP_KERNEL); 643 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
1200 if (!port) 644 if (!port)
1201 return NULL; 645 return ERR_PTR(-ENOMEM);
1202 646
1203 /* initialise reference count stuff */
1204 atomic_set(&port->refcount, 0);
1205 init_waitqueue_head(&port->remove_wq); 647 init_waitqueue_head(&port->remove_wq);
1206 648
1207 INIT_LIST_HEAD(&port->unit_list_head); 649 INIT_LIST_HEAD(&port->unit_list_head);
1208 INIT_LIST_HEAD(&port->unit_remove_lh); 650 INIT_LIST_HEAD(&port->unit_remove_lh);
1209 651
1210 port->adapter = adapter; 652 port->adapter = adapter;
653 port->d_id = d_id;
654 port->wwpn = wwpn;
1211 655
1212 if (check_wwpn) 656 /* mark port unusable as long as sysfs registration is not complete */
1213 port->wwpn = wwpn; 657 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
1214 658 atomic_set(&port->refcount, 0);
1215 atomic_set_mask(status, &port->status);
1216 659
1217 /* setup for sysfs registration */
1218 if (status & ZFCP_STATUS_PORT_WKA) { 660 if (status & ZFCP_STATUS_PORT_WKA) {
1219 switch (d_id) { 661 switch (d_id) {
1220 case ZFCP_DID_DIRECTORY_SERVICE: 662 case ZFCP_DID_DIRECTORY_SERVICE:
1221 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 663 bus_id = "directory";
1222 "directory");
1223 break; 664 break;
1224 case ZFCP_DID_MANAGEMENT_SERVICE: 665 case ZFCP_DID_MANAGEMENT_SERVICE:
1225 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 666 bus_id = "management";
1226 "management");
1227 break; 667 break;
1228 case ZFCP_DID_KEY_DISTRIBUTION_SERVICE: 668 case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
1229 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 669 bus_id = "key_distribution";
1230 "key_distribution");
1231 break; 670 break;
1232 case ZFCP_DID_ALIAS_SERVICE: 671 case ZFCP_DID_ALIAS_SERVICE:
1233 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 672 bus_id = "alias";
1234 "alias");
1235 break; 673 break;
1236 case ZFCP_DID_TIME_SERVICE: 674 case ZFCP_DID_TIME_SERVICE:
1237 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, 675 bus_id = "time";
1238 "time");
1239 break; 676 break;
1240 default: 677 default:
1241 kfree(port); 678 kfree(port);
1242 return NULL; 679 return ERR_PTR(-EINVAL);
1243 } 680 }
1244 port->d_id = d_id; 681 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
1245 port->sysfs_device.parent = &adapter->generic_services; 682 port->sysfs_device.parent = &adapter->generic_services;
1246 } else { 683 } else {
1247 snprintf(port->sysfs_device.bus_id, 684 snprintf(port->sysfs_device.bus_id,
1248 BUS_ID_SIZE, "0x%016llx", wwpn); 685 BUS_ID_SIZE, "0x%016llx", wwpn);
1249 port->sysfs_device.parent = &adapter->ccw_device->dev; 686 port->sysfs_device.parent = &adapter->ccw_device->dev;
1250 } 687 }
688
1251 port->sysfs_device.release = zfcp_sysfs_port_release; 689 port->sysfs_device.release = zfcp_sysfs_port_release;
1252 dev_set_drvdata(&port->sysfs_device, port); 690 dev_set_drvdata(&port->sysfs_device, port);
1253 691
1254 /* mark port unusable as long as sysfs registration is not complete */ 692 read_lock_irq(&zfcp_data.config_lock);
1255 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 693 if (!(status & ZFCP_STATUS_PORT_NO_WWPN))
694 if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
695 read_unlock_irq(&zfcp_data.config_lock);
696 goto err_out_free;
697 }
698 read_unlock_irq(&zfcp_data.config_lock);
1256 699
1257 if (device_register(&port->sysfs_device)) { 700 if (device_register(&port->sysfs_device))
1258 kfree(port); 701 goto err_out_free;
1259 return NULL; 702
1260 } 703 if (status & ZFCP_STATUS_PORT_WKA)
704 retval = sysfs_create_group(&port->sysfs_device.kobj,
705 &zfcp_sysfs_ns_port_attrs);
706 else
707 retval = sysfs_create_group(&port->sysfs_device.kobj,
708 &zfcp_sysfs_port_attrs);
1261 709
1262 if (zfcp_sysfs_port_create_files(&port->sysfs_device, status)) { 710 if (retval) {
1263 device_unregister(&port->sysfs_device); 711 device_unregister(&port->sysfs_device);
1264 return NULL; 712 goto err_out;
1265 } 713 }
1266 714
1267 zfcp_port_get(port); 715 zfcp_port_get(port);
@@ -1274,15 +722,23 @@ zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status,
1274 if (!adapter->nameserver_port) 722 if (!adapter->nameserver_port)
1275 adapter->nameserver_port = port; 723 adapter->nameserver_port = port;
1276 adapter->ports++; 724 adapter->ports++;
725
1277 write_unlock_irq(&zfcp_data.config_lock); 726 write_unlock_irq(&zfcp_data.config_lock);
1278 727
1279 zfcp_adapter_get(adapter); 728 zfcp_adapter_get(adapter);
1280
1281 return port; 729 return port;
730
731err_out_free:
732 kfree(port);
733err_out:
734 return ERR_PTR(-EINVAL);
1282} 735}
1283 736
1284void 737/**
1285zfcp_port_dequeue(struct zfcp_port *port) 738 * zfcp_port_dequeue - dequeues a port from the port list of the adapter
739 * @port: pointer to struct zfcp_port which should be removed
740 */
741void zfcp_port_dequeue(struct zfcp_port *port)
1286{ 742{
1287 zfcp_port_wait(port); 743 zfcp_port_wait(port);
1288 write_lock_irq(&zfcp_data.config_lock); 744 write_lock_irq(&zfcp_data.config_lock);
@@ -1293,546 +749,53 @@ zfcp_port_dequeue(struct zfcp_port *port)
1293 fc_remote_port_delete(port->rport); 749 fc_remote_port_delete(port->rport);
1294 port->rport = NULL; 750 port->rport = NULL;
1295 zfcp_adapter_put(port->adapter); 751 zfcp_adapter_put(port->adapter);
1296 zfcp_sysfs_port_remove_files(&port->sysfs_device, 752 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
1297 atomic_read(&port->status)); 753 sysfs_remove_group(&port->sysfs_device.kobj,
1298 device_unregister(&port->sysfs_device); 754 &zfcp_sysfs_ns_port_attrs);
1299}
1300
1301/* Enqueues a nameserver port */
1302int
1303zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
1304{
1305 struct zfcp_port *port;
1306
1307 port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
1308 ZFCP_DID_DIRECTORY_SERVICE);
1309 if (!port) {
1310 ZFCP_LOG_INFO("error: enqueue of nameserver port for "
1311 "adapter %s failed\n",
1312 zfcp_get_busid_by_adapter(adapter));
1313 return -ENXIO;
1314 }
1315 zfcp_port_put(port);
1316
1317 return 0;
1318}
1319
1320#undef ZFCP_LOG_AREA
1321
1322/****************************************************************/
1323/******* Fibre Channel Standard related Functions **************/
1324/****************************************************************/
1325
1326#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC
1327
1328static void zfcp_fsf_incoming_els_rscn(struct zfcp_fsf_req *fsf_req)
1329{
1330 struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
1331 struct zfcp_adapter *adapter = fsf_req->adapter;
1332 struct fcp_rscn_head *fcp_rscn_head;
1333 struct fcp_rscn_element *fcp_rscn_element;
1334 struct zfcp_port *port;
1335 u16 i;
1336 u16 no_entries;
1337 u32 range_mask;
1338 unsigned long flags;
1339
1340 fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload;
1341 fcp_rscn_element = (struct fcp_rscn_element *) status_buffer->payload;
1342
1343 /* see FC-FS */
1344 no_entries = (fcp_rscn_head->payload_len / 4);
1345
1346 for (i = 1; i < no_entries; i++) {
1347 /* skip head and start with 1st element */
1348 fcp_rscn_element++;
1349 switch (fcp_rscn_element->addr_format) {
1350 case ZFCP_PORT_ADDRESS:
1351 range_mask = ZFCP_PORTS_RANGE_PORT;
1352 break;
1353 case ZFCP_AREA_ADDRESS:
1354 range_mask = ZFCP_PORTS_RANGE_AREA;
1355 break;
1356 case ZFCP_DOMAIN_ADDRESS:
1357 range_mask = ZFCP_PORTS_RANGE_DOMAIN;
1358 break;
1359 case ZFCP_FABRIC_ADDRESS:
1360 range_mask = ZFCP_PORTS_RANGE_FABRIC;
1361 break;
1362 default:
1363 ZFCP_LOG_INFO("incoming RSCN with unknown "
1364 "address format\n");
1365 continue;
1366 }
1367 read_lock_irqsave(&zfcp_data.config_lock, flags);
1368 list_for_each_entry(port, &adapter->port_list_head, list) {
1369 if (atomic_test_mask
1370 (ZFCP_STATUS_PORT_WKA, &port->status))
1371 continue;
1372 /* Do we know this port? If not skip it. */
1373 if (!atomic_test_mask
1374 (ZFCP_STATUS_PORT_DID_DID, &port->status)) {
1375 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1376 "port 0x%016Lx\n", port->wwpn);
1377 zfcp_erp_port_reopen(port,
1378 ZFCP_STATUS_COMMON_ERP_FAILED,
1379 82, fsf_req);
1380 continue;
1381 }
1382
1383 /*
1384 * FIXME: race: d_id might being invalidated
1385 * (...DID_DID reset)
1386 */
1387 if ((port->d_id & range_mask)
1388 == (fcp_rscn_element->nport_did & range_mask)) {
1389 ZFCP_LOG_TRACE("reopen did 0x%08x\n",
1390 fcp_rscn_element->nport_did);
1391 /*
1392 * Unfortunately, an RSCN does not specify the
1393 * type of change a target underwent. We assume
1394 * that it makes sense to reopen the link.
1395 * FIXME: Shall we try to find out more about
1396 * the target and link state before closing it?
1397 * How to accomplish this? (nameserver?)
1398 * Where would such code be put in?
1399 * (inside or outside erp)
1400 */
1401 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1402 "port 0x%016Lx\n", port->wwpn);
1403 zfcp_test_link(port);
1404 }
1405 }
1406 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1407 }
1408}
1409
1410static void zfcp_fsf_incoming_els_plogi(struct zfcp_fsf_req *fsf_req)
1411{
1412 struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
1413 struct zfcp_adapter *adapter = fsf_req->adapter;
1414 struct fsf_plogi *els_plogi;
1415 struct zfcp_port *port;
1416 unsigned long flags;
1417
1418 els_plogi = (struct fsf_plogi *) status_buffer->payload;
1419 read_lock_irqsave(&zfcp_data.config_lock, flags);
1420 list_for_each_entry(port, &adapter->port_list_head, list) {
1421 if (port->wwpn == (*(wwn_t *) &els_plogi->serv_param.wwpn))
1422 break;
1423 }
1424 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1425
1426 if (!port || (port->wwpn != (*(wwn_t *) &els_plogi->serv_param.wwpn))) {
1427 ZFCP_LOG_DEBUG("ignored incoming PLOGI for nonexisting port "
1428 "with d_id 0x%06x on adapter %s\n",
1429 status_buffer->d_id,
1430 zfcp_get_busid_by_adapter(adapter));
1431 } else {
1432 zfcp_erp_port_forced_reopen(port, 0, 83, fsf_req);
1433 }
1434}
1435
1436static void zfcp_fsf_incoming_els_logo(struct zfcp_fsf_req *fsf_req)
1437{
1438 struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data;
1439 struct zfcp_adapter *adapter = fsf_req->adapter;
1440 struct fcp_logo *els_logo = (struct fcp_logo *) status_buffer->payload;
1441 struct zfcp_port *port;
1442 unsigned long flags;
1443
1444 read_lock_irqsave(&zfcp_data.config_lock, flags);
1445 list_for_each_entry(port, &adapter->port_list_head, list) {
1446 if (port->wwpn == els_logo->nport_wwpn)
1447 break;
1448 }
1449 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1450
1451 if (!port || (port->wwpn != els_logo->nport_wwpn)) {
1452 ZFCP_LOG_DEBUG("ignored incoming LOGO for nonexisting port "
1453 "with d_id 0x%06x on adapter %s\n",
1454 status_buffer->d_id,
1455 zfcp_get_busid_by_adapter(adapter));
1456 } else {
1457 zfcp_erp_port_forced_reopen(port, 0, 84, fsf_req);
1458 }
1459}
1460
1461static void
1462zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
1463 struct fsf_status_read_buffer *status_buffer)
1464{
1465 ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
1466 "for adapter %s\n", *(u32 *) (status_buffer->payload),
1467 zfcp_get_busid_by_adapter(adapter));
1468
1469}
1470
1471void
1472zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
1473{
1474 struct fsf_status_read_buffer *status_buffer;
1475 u32 els_type;
1476 struct zfcp_adapter *adapter;
1477
1478 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
1479 els_type = *(u32 *) (status_buffer->payload);
1480 adapter = fsf_req->adapter;
1481
1482 zfcp_san_dbf_event_incoming_els(fsf_req);
1483 if (els_type == LS_PLOGI)
1484 zfcp_fsf_incoming_els_plogi(fsf_req);
1485 else if (els_type == LS_LOGO)
1486 zfcp_fsf_incoming_els_logo(fsf_req);
1487 else if ((els_type & 0xffff0000) == LS_RSCN)
1488 /* we are only concerned with the command, not the length */
1489 zfcp_fsf_incoming_els_rscn(fsf_req);
1490 else
1491 zfcp_fsf_incoming_els_unknown(adapter, status_buffer);
1492}
1493
1494
1495/**
1496 * zfcp_gid_pn_buffers_alloc - allocate buffers for GID_PN nameserver request
1497 * @gid_pn: pointer to return pointer to struct zfcp_gid_pn_data
1498 * @pool: pointer to mempool_t if non-null memory pool is used for allocation
1499 */
1500static int
1501zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
1502{
1503 struct zfcp_gid_pn_data *data;
1504
1505 if (pool != NULL) {
1506 data = mempool_alloc(pool, GFP_ATOMIC);
1507 if (likely(data != NULL)) {
1508 data->ct.pool = pool;
1509 }
1510 } else {
1511 data = kmem_cache_alloc(zfcp_data.gid_pn_cache, GFP_ATOMIC);
1512 }
1513
1514 if (NULL == data)
1515 return -ENOMEM;
1516
1517 memset(data, 0, sizeof(*data));
1518 sg_init_table(&data->req , 1);
1519 sg_init_table(&data->resp , 1);
1520 data->ct.req = &data->req;
1521 data->ct.resp = &data->resp;
1522 data->ct.req_count = data->ct.resp_count = 1;
1523 zfcp_address_to_sg(&data->ct_iu_req, &data->req, sizeof(struct ct_iu_gid_pn_req));
1524 zfcp_address_to_sg(&data->ct_iu_resp, &data->resp, sizeof(struct ct_iu_gid_pn_resp));
1525
1526 *gid_pn = data;
1527 return 0;
1528}
1529
1530/**
1531 * zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request
1532 * @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed
1533 */
1534static void zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
1535{
1536 if (gid_pn->ct.pool)
1537 mempool_free(gid_pn, gid_pn->ct.pool);
1538 else 755 else
1539 kmem_cache_free(zfcp_data.gid_pn_cache, gid_pn); 756 sysfs_remove_group(&port->sysfs_device.kobj,
1540} 757 &zfcp_sysfs_port_attrs);
1541 758 device_unregister(&port->sysfs_device);
1542/**
1543 * zfcp_ns_gid_pn_request - initiate GID_PN nameserver request
1544 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
1545 */
1546int
1547zfcp_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
1548{
1549 int ret;
1550 struct ct_iu_gid_pn_req *ct_iu_req;
1551 struct zfcp_gid_pn_data *gid_pn;
1552 struct zfcp_adapter *adapter = erp_action->adapter;
1553
1554 ret = zfcp_gid_pn_buffers_alloc(&gid_pn, adapter->pool.data_gid_pn);
1555 if (ret < 0) {
1556 ZFCP_LOG_INFO("error: buffer allocation for gid_pn nameserver "
1557 "request failed for adapter %s\n",
1558 zfcp_get_busid_by_adapter(adapter));
1559 goto out;
1560 }
1561
1562 /* setup nameserver request */
1563 ct_iu_req = zfcp_sg_to_address(gid_pn->ct.req);
1564 ct_iu_req->header.revision = ZFCP_CT_REVISION;
1565 ct_iu_req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
1566 ct_iu_req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
1567 ct_iu_req->header.options = ZFCP_CT_SYNCHRONOUS;
1568 ct_iu_req->header.cmd_rsp_code = ZFCP_CT_GID_PN;
1569 ct_iu_req->header.max_res_size = ZFCP_CT_MAX_SIZE;
1570 ct_iu_req->wwpn = erp_action->port->wwpn;
1571
1572 /* setup parameters for send generic command */
1573 gid_pn->ct.port = adapter->nameserver_port;
1574 gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
1575 gid_pn->ct.handler_data = (unsigned long) gid_pn;
1576 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
1577 gid_pn->port = erp_action->port;
1578
1579 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
1580 erp_action);
1581 if (ret) {
1582 ZFCP_LOG_INFO("error: initiation of gid_pn nameserver request "
1583 "failed for adapter %s\n",
1584 zfcp_get_busid_by_adapter(adapter));
1585
1586 zfcp_gid_pn_buffers_free(gid_pn);
1587 }
1588
1589 out:
1590 return ret;
1591}
1592
1593/**
1594 * zfcp_ns_gid_pn_handler - handler for GID_PN nameserver request
1595 * @data: unsigned long, contains pointer to struct zfcp_gid_pn_data
1596 */
1597static void zfcp_ns_gid_pn_handler(unsigned long data)
1598{
1599 struct zfcp_port *port;
1600 struct zfcp_send_ct *ct;
1601 struct ct_iu_gid_pn_req *ct_iu_req;
1602 struct ct_iu_gid_pn_resp *ct_iu_resp;
1603 struct zfcp_gid_pn_data *gid_pn;
1604
1605
1606 gid_pn = (struct zfcp_gid_pn_data *) data;
1607 port = gid_pn->port;
1608 ct = &gid_pn->ct;
1609 ct_iu_req = zfcp_sg_to_address(ct->req);
1610 ct_iu_resp = zfcp_sg_to_address(ct->resp);
1611
1612 if (ct->status != 0)
1613 goto failed;
1614
1615 if (zfcp_check_ct_response(&ct_iu_resp->header)) {
1616 /* FIXME: do we need some specific erp entry points */
1617 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
1618 goto failed;
1619 }
1620 /* paranoia */
1621 if (ct_iu_req->wwpn != port->wwpn) {
1622 ZFCP_LOG_NORMAL("bug: wwpn 0x%016Lx returned by nameserver "
1623 "lookup does not match expected wwpn 0x%016Lx "
1624 "for adapter %s\n", ct_iu_req->wwpn, port->wwpn,
1625 zfcp_get_busid_by_port(port));
1626 goto mismatch;
1627 }
1628
1629 /* looks like a valid d_id */
1630 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
1631 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
1632 ZFCP_LOG_DEBUG("adapter %s: wwpn=0x%016Lx ---> d_id=0x%06x\n",
1633 zfcp_get_busid_by_port(port), port->wwpn, port->d_id);
1634 goto out;
1635
1636 mismatch:
1637 ZFCP_LOG_DEBUG("CT IUs do not match:\n");
1638 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_req,
1639 sizeof(struct ct_iu_gid_pn_req));
1640 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_resp,
1641 sizeof(struct ct_iu_gid_pn_resp));
1642
1643 failed:
1644 ZFCP_LOG_NORMAL("warning: failed gid_pn nameserver request for wwpn "
1645 "0x%016Lx for adapter %s\n",
1646 port->wwpn, zfcp_get_busid_by_port(port));
1647 out:
1648 zfcp_gid_pn_buffers_free(gid_pn);
1649 return;
1650} 759}
1651 760
1652/* reject CT_IU reason codes acc. to FC-GS-4 */
1653static const struct zfcp_rc_entry zfcp_ct_rc[] = {
1654 {0x01, "invalid command code"},
1655 {0x02, "invalid version level"},
1656 {0x03, "logical error"},
1657 {0x04, "invalid CT_IU size"},
1658 {0x05, "logical busy"},
1659 {0x07, "protocol error"},
1660 {0x09, "unable to perform command request"},
1661 {0x0b, "command not supported"},
1662 {0x0d, "server not available"},
1663 {0x0e, "session could not be established"},
1664 {0xff, "vendor specific error"},
1665 {0, NULL},
1666};
1667
1668/* LS_RJT reason codes acc. to FC-FS */
1669static const struct zfcp_rc_entry zfcp_ls_rjt_rc[] = {
1670 {0x01, "invalid LS_Command code"},
1671 {0x03, "logical error"},
1672 {0x05, "logical busy"},
1673 {0x07, "protocol error"},
1674 {0x09, "unable to perform command request"},
1675 {0x0b, "command not supported"},
1676 {0x0e, "command already in progress"},
1677 {0xff, "vendor specific error"},
1678 {0, NULL},
1679};
1680
1681/* reject reason codes according to FC-PH/FC-FS */
1682static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
1683 {0x01, "invalid D_ID"},
1684 {0x02, "invalid S_ID"},
1685 {0x03, "Nx_Port not available, temporary"},
1686 {0x04, "Nx_Port not available, permament"},
1687 {0x05, "class not supported"},
1688 {0x06, "delimiter usage error"},
1689 {0x07, "TYPE not supported"},
1690 {0x08, "invalid Link_Control"},
1691 {0x09, "invalid R_CTL field"},
1692 {0x0a, "invalid F_CTL field"},
1693 {0x0b, "invalid OX_ID"},
1694 {0x0c, "invalid RX_ID"},
1695 {0x0d, "invalid SEQ_ID"},
1696 {0x0e, "invalid DF_CTL"},
1697 {0x0f, "invalid SEQ_CNT"},
1698 {0x10, "invalid parameter field"},
1699 {0x11, "exchange error"},
1700 {0x12, "protocol error"},
1701 {0x13, "incorrect length"},
1702 {0x14, "unsupported ACK"},
1703 {0x15, "class of service not supported by entity at FFFFFE"},
1704 {0x16, "login required"},
1705 {0x17, "excessive sequences attempted"},
1706 {0x18, "unable to establish exchange"},
1707 {0x1a, "fabric path not available"},
1708 {0x1b, "invalid VC_ID (class 4)"},
1709 {0x1c, "invalid CS_CTL field"},
1710 {0x1d, "insufficient resources for VC (class 4)"},
1711 {0x1f, "invalid class of service"},
1712 {0x20, "preemption request rejected"},
1713 {0x21, "preemption not enabled"},
1714 {0x22, "multicast error"},
1715 {0x23, "multicast error terminate"},
1716 {0x24, "process login required"},
1717 {0xff, "vendor specific reject"},
1718 {0, NULL},
1719};
1720
1721/** 761/**
1722 * zfcp_rc_description - return description for given reaon code 762 * zfcp_sg_free_table - free memory used by scatterlists
1723 * @code: reason code 763 * @sg: pointer to scatterlist
1724 * @rc_table: table of reason codes and descriptions 764 * @count: number of scatterlist which are to be free'ed
765 * the scatterlist are expected to reference pages always
1725 */ 766 */
1726static const char * 767void zfcp_sg_free_table(struct scatterlist *sg, int count)
1727zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
1728{ 768{
1729 const char *descr = "unknown reason code"; 769 int i;
1730 770
1731 do { 771 for (i = 0; i < count; i++, sg++)
1732 if (code == rc_table->code) { 772 if (sg)
1733 descr = rc_table->description; 773 free_page((unsigned long) sg_virt(sg));
774 else
1734 break; 775 break;
1735 }
1736 rc_table++;
1737 } while (rc_table->code && rc_table->description);
1738
1739 return descr;
1740} 776}
1741 777
1742/** 778/**
1743 * zfcp_check_ct_response - evaluate reason code for CT_IU 779 * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
1744 * @rjt: response payload to an CT_IU request 780 * @sg: pointer to struct scatterlist
1745 * Return: 0 for accept CT_IU, 1 for reject CT_IU or invlid response code 781 * @count: number of scatterlists which should be assigned with buffers
782 * of size page
783 *
784 * Returns: 0 on success, -ENOMEM otherwise
1746 */ 785 */
1747int 786int zfcp_sg_setup_table(struct scatterlist *sg, int count)
1748zfcp_check_ct_response(struct ct_hdr *rjt)
1749{ 787{
1750 if (rjt->cmd_rsp_code == ZFCP_CT_ACCEPT) 788 void *addr;
1751 return 0; 789 int i;
1752 790
1753 if (rjt->cmd_rsp_code != ZFCP_CT_REJECT) { 791 sg_init_table(sg, count);
1754 ZFCP_LOG_NORMAL("error: invalid Generic Service command/" 792 for (i = 0; i < count; i++, sg++) {
1755 "response code (0x%04hx)\n", 793 addr = (void *) get_zeroed_page(GFP_KERNEL);
1756 rjt->cmd_rsp_code); 794 if (!addr) {
1757 return 1; 795 zfcp_sg_free_table(sg, i);
796 return -ENOMEM;
797 }
798 sg_set_buf(sg, addr, PAGE_SIZE);
1758 } 799 }
1759 800 return 0;
1760 ZFCP_LOG_INFO("Generic Service command rejected\n");
1761 ZFCP_LOG_INFO("%s (0x%02x, 0x%02x, 0x%02x)\n",
1762 zfcp_rc_description(rjt->reason_code, zfcp_ct_rc),
1763 (u32) rjt->reason_code, (u32) rjt->reason_code_expl,
1764 (u32) rjt->vendor_unique);
1765
1766 return 1;
1767}
1768
1769/**
1770 * zfcp_print_els_rjt - print reject parameter and description for ELS reject
1771 * @rjt_par: reject parameter acc. to FC-PH/FC-FS
1772 * @rc_table: table of reason codes and descriptions
1773 */
1774static void
1775zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
1776 const struct zfcp_rc_entry *rc_table)
1777{
1778 ZFCP_LOG_INFO("%s (%02x %02x %02x %02x)\n",
1779 zfcp_rc_description(rjt_par->reason_code, rc_table),
1780 (u32) rjt_par->action, (u32) rjt_par->reason_code,
1781 (u32) rjt_par->reason_expl, (u32) rjt_par->vendor_unique);
1782}
1783
1784/**
1785 * zfcp_fsf_handle_els_rjt - evaluate status qualifier/reason code on ELS reject
1786 * @sq: status qualifier word
1787 * @rjt_par: reject parameter as described in FC-PH and FC-FS
1788 * Return: -EROMTEIO for LS_RJT, -EREMCHG for invalid D_ID, -EIO else
1789 */
1790int
1791zfcp_handle_els_rjt(u32 sq, struct zfcp_ls_rjt_par *rjt_par)
1792{
1793 int ret = -EIO;
1794
1795 if (sq == FSF_IOSTAT_NPORT_RJT) {
1796 ZFCP_LOG_INFO("ELS rejected (P_RJT)\n");
1797 zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
1798 /* invalid d_id */
1799 if (rjt_par->reason_code == 0x01)
1800 ret = -EREMCHG;
1801 } else if (sq == FSF_IOSTAT_FABRIC_RJT) {
1802 ZFCP_LOG_INFO("ELS rejected (F_RJT)\n");
1803 zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
1804 /* invalid d_id */
1805 if (rjt_par->reason_code == 0x01)
1806 ret = -EREMCHG;
1807 } else if (sq == FSF_IOSTAT_LS_RJT) {
1808 ZFCP_LOG_INFO("ELS rejected (LS_RJT)\n");
1809 zfcp_print_els_rjt(rjt_par, zfcp_ls_rjt_rc);
1810 ret = -EREMOTEIO;
1811 } else
1812 ZFCP_LOG_INFO("unexpected SQ: 0x%02x\n", sq);
1813
1814 return ret;
1815}
1816
1817/**
1818 * zfcp_plogi_evaluate - evaluate PLOGI playload and copy important fields
1819 * into zfcp_port structure
1820 * @port: zfcp_port structure
1821 * @plogi: plogi payload
1822 */
1823void
1824zfcp_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
1825{
1826 port->maxframe_size = plogi->serv_param.common_serv_param[7] |
1827 ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
1828 if (plogi->serv_param.class1_serv_param[0] & 0x80)
1829 port->supported_classes |= FC_COS_CLASS1;
1830 if (plogi->serv_param.class2_serv_param[0] & 0x80)
1831 port->supported_classes |= FC_COS_CLASS2;
1832 if (plogi->serv_param.class3_serv_param[0] & 0x80)
1833 port->supported_classes |= FC_COS_CLASS3;
1834 if (plogi->serv_param.class4_serv_param[0] & 0x80)
1835 port->supported_classes |= FC_COS_CLASS4;
1836} 801}
1837
1838#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 66d3b88844b0..391dd29749f8 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -1,64 +1,13 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Registration and callback for the s390 common I/O layer.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#include "zfcp_ext.h" 9#include "zfcp_ext.h"
23 10
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26static int zfcp_ccw_probe(struct ccw_device *);
27static void zfcp_ccw_remove(struct ccw_device *);
28static int zfcp_ccw_set_online(struct ccw_device *);
29static int zfcp_ccw_set_offline(struct ccw_device *);
30static int zfcp_ccw_notify(struct ccw_device *, int);
31static void zfcp_ccw_shutdown(struct ccw_device *);
32
33static struct ccw_device_id zfcp_ccw_device_id[] = {
34 {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
35 ZFCP_CONTROL_UNIT_MODEL,
36 ZFCP_DEVICE_TYPE,
37 ZFCP_DEVICE_MODEL)},
38 {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
39 ZFCP_CONTROL_UNIT_MODEL,
40 ZFCP_DEVICE_TYPE,
41 ZFCP_DEVICE_MODEL_PRIV)},
42 {},
43};
44
45static struct ccw_driver zfcp_ccw_driver = {
46 .owner = THIS_MODULE,
47 .name = ZFCP_NAME,
48 .ids = zfcp_ccw_device_id,
49 .probe = zfcp_ccw_probe,
50 .remove = zfcp_ccw_remove,
51 .set_online = zfcp_ccw_set_online,
52 .set_offline = zfcp_ccw_set_offline,
53 .notify = zfcp_ccw_notify,
54 .shutdown = zfcp_ccw_shutdown,
55 .driver = {
56 .groups = zfcp_driver_attr_groups,
57 },
58};
59
60MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
61
62/** 11/**
63 * zfcp_ccw_probe - probe function of zfcp driver 12 * zfcp_ccw_probe - probe function of zfcp driver
64 * @ccw_device: pointer to belonging ccw device 13 * @ccw_device: pointer to belonging ccw device
@@ -69,19 +18,16 @@ MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
69 * In addition the nameserver port will be added to the ports of the adapter 18 * In addition the nameserver port will be added to the ports of the adapter
70 * and its sysfs representation will be created too. 19 * and its sysfs representation will be created too.
71 */ 20 */
72static int 21static int zfcp_ccw_probe(struct ccw_device *ccw_device)
73zfcp_ccw_probe(struct ccw_device *ccw_device)
74{ 22{
75 struct zfcp_adapter *adapter;
76 int retval = 0; 23 int retval = 0;
77 24
78 down(&zfcp_data.config_sema); 25 down(&zfcp_data.config_sema);
79 adapter = zfcp_adapter_enqueue(ccw_device); 26 if (zfcp_adapter_enqueue(ccw_device)) {
80 if (!adapter) 27 dev_err(&ccw_device->dev,
28 "Setup of data structures failed.\n");
81 retval = -EINVAL; 29 retval = -EINVAL;
82 else 30 }
83 ZFCP_LOG_DEBUG("Probed adapter %s\n",
84 zfcp_get_busid_by_adapter(adapter));
85 up(&zfcp_data.config_sema); 31 up(&zfcp_data.config_sema);
86 return retval; 32 return retval;
87} 33}
@@ -95,8 +41,7 @@ zfcp_ccw_probe(struct ccw_device *ccw_device)
95 * ports that belong to this adapter. And in addition all resources of this 41 * ports that belong to this adapter. And in addition all resources of this
96 * adapter will be freed too. 42 * adapter will be freed too.
97 */ 43 */
98static void 44static void zfcp_ccw_remove(struct ccw_device *ccw_device)
99zfcp_ccw_remove(struct ccw_device *ccw_device)
100{ 45{
101 struct zfcp_adapter *adapter; 46 struct zfcp_adapter *adapter;
102 struct zfcp_port *port, *p; 47 struct zfcp_port *port, *p;
@@ -106,8 +51,6 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
106 down(&zfcp_data.config_sema); 51 down(&zfcp_data.config_sema);
107 adapter = dev_get_drvdata(&ccw_device->dev); 52 adapter = dev_get_drvdata(&ccw_device->dev);
108 53
109 ZFCP_LOG_DEBUG("Removing adapter %s\n",
110 zfcp_get_busid_by_adapter(adapter));
111 write_lock_irq(&zfcp_data.config_lock); 54 write_lock_irq(&zfcp_data.config_lock);
112 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { 55 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
113 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) { 56 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
@@ -145,8 +88,7 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
145 * registered with the SCSI stack, that the QDIO queues will be set up 88 * registered with the SCSI stack, that the QDIO queues will be set up
146 * and that the adapter will be opened (asynchronously). 89 * and that the adapter will be opened (asynchronously).
147 */ 90 */
148static int 91static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
149zfcp_ccw_set_online(struct ccw_device *ccw_device)
150{ 92{
151 struct zfcp_adapter *adapter; 93 struct zfcp_adapter *adapter;
152 int retval; 94 int retval;
@@ -155,12 +97,8 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
155 adapter = dev_get_drvdata(&ccw_device->dev); 97 adapter = dev_get_drvdata(&ccw_device->dev);
156 98
157 retval = zfcp_erp_thread_setup(adapter); 99 retval = zfcp_erp_thread_setup(adapter);
158 if (retval) { 100 if (retval)
159 ZFCP_LOG_INFO("error: start of error recovery thread for "
160 "adapter %s failed\n",
161 zfcp_get_busid_by_adapter(adapter));
162 goto out; 101 goto out;
163 }
164 102
165 retval = zfcp_adapter_scsi_register(adapter); 103 retval = zfcp_adapter_scsi_register(adapter);
166 if (retval) 104 if (retval)
@@ -191,8 +129,7 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
191 * This function gets called by the common i/o layer and sets an adapter 129 * This function gets called by the common i/o layer and sets an adapter
192 * into state offline. 130 * into state offline.
193 */ 131 */
194static int 132static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
195zfcp_ccw_set_offline(struct ccw_device *ccw_device)
196{ 133{
197 struct zfcp_adapter *adapter; 134 struct zfcp_adapter *adapter;
198 135
@@ -206,15 +143,14 @@ zfcp_ccw_set_offline(struct ccw_device *ccw_device)
206} 143}
207 144
208/** 145/**
209 * zfcp_ccw_notify 146 * zfcp_ccw_notify - ccw notify function
210 * @ccw_device: pointer to belonging ccw device 147 * @ccw_device: pointer to belonging ccw device
211 * @event: indicates if adapter was detached or attached 148 * @event: indicates if adapter was detached or attached
212 * 149 *
213 * This function gets called by the common i/o layer if an adapter has gone 150 * This function gets called by the common i/o layer if an adapter has gone
214 * or reappeared. 151 * or reappeared.
215 */ 152 */
216static int 153static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
217zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
218{ 154{
219 struct zfcp_adapter *adapter; 155 struct zfcp_adapter *adapter;
220 156
@@ -222,18 +158,15 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
222 adapter = dev_get_drvdata(&ccw_device->dev); 158 adapter = dev_get_drvdata(&ccw_device->dev);
223 switch (event) { 159 switch (event) {
224 case CIO_GONE: 160 case CIO_GONE:
225 ZFCP_LOG_NORMAL("adapter %s: device gone\n", 161 dev_warn(&adapter->ccw_device->dev, "device gone\n");
226 zfcp_get_busid_by_adapter(adapter));
227 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL); 162 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
228 break; 163 break;
229 case CIO_NO_PATH: 164 case CIO_NO_PATH:
230 ZFCP_LOG_NORMAL("adapter %s: no path\n", 165 dev_warn(&adapter->ccw_device->dev, "no path\n");
231 zfcp_get_busid_by_adapter(adapter));
232 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL); 166 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
233 break; 167 break;
234 case CIO_OPER: 168 case CIO_OPER:
235 ZFCP_LOG_NORMAL("adapter %s: operational again\n", 169 dev_info(&adapter->ccw_device->dev, "operational again\n");
236 zfcp_get_busid_by_adapter(adapter));
237 zfcp_erp_modify_adapter_status(adapter, 11, NULL, 170 zfcp_erp_modify_adapter_status(adapter, 11, NULL,
238 ZFCP_STATUS_COMMON_RUNNING, 171 ZFCP_STATUS_COMMON_RUNNING,
239 ZFCP_SET); 172 ZFCP_SET);
@@ -247,24 +180,10 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
247} 180}
248 181
249/** 182/**
250 * zfcp_ccw_register - ccw register function 183 * zfcp_ccw_shutdown - handle shutdown from cio
251 * 184 * @cdev: device for adapter to shutdown.
252 * Registers the driver at the common i/o layer. This function will be called
253 * at module load time/system start.
254 */
255int __init
256zfcp_ccw_register(void)
257{
258 return ccw_driver_register(&zfcp_ccw_driver);
259}
260
261/**
262 * zfcp_ccw_shutdown - gets called on reboot/shutdown
263 *
264 * Makes sure that QDIO queues are down when the system gets stopped.
265 */ 185 */
266static void 186static void zfcp_ccw_shutdown(struct ccw_device *cdev)
267zfcp_ccw_shutdown(struct ccw_device *cdev)
268{ 187{
269 struct zfcp_adapter *adapter; 188 struct zfcp_adapter *adapter;
270 189
@@ -275,4 +194,33 @@ zfcp_ccw_shutdown(struct ccw_device *cdev)
275 up(&zfcp_data.config_sema); 194 up(&zfcp_data.config_sema);
276} 195}
277 196
278#undef ZFCP_LOG_AREA 197static struct ccw_device_id zfcp_ccw_device_id[] = {
198 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
199 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
200 {},
201};
202
203MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
204
205static struct ccw_driver zfcp_ccw_driver = {
206 .owner = THIS_MODULE,
207 .name = "zfcp",
208 .ids = zfcp_ccw_device_id,
209 .probe = zfcp_ccw_probe,
210 .remove = zfcp_ccw_remove,
211 .set_online = zfcp_ccw_set_online,
212 .set_offline = zfcp_ccw_set_offline,
213 .notify = zfcp_ccw_notify,
214 .shutdown = zfcp_ccw_shutdown,
215};
216
217/**
218 * zfcp_ccw_register - ccw register function
219 *
220 * Registers the driver at the common i/o layer. This function will be called
221 * at module load time/system start.
222 */
223int __init zfcp_ccw_register(void)
224{
225 return ccw_driver_register(&zfcp_ccw_driver);
226}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
new file mode 100644
index 000000000000..ec2abceca6dc
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -0,0 +1,259 @@
1/*
2 * zfcp device driver
3 *
4 * Userspace interface for accessing the
5 * Access Control Lists / Control File Data Channel
6 *
7 * Copyright IBM Corporation 2008
8 */
9
10#include <linux/types.h>
11#include <linux/miscdevice.h>
12#include <asm/ccwdev.h>
13#include "zfcp_def.h"
14#include "zfcp_ext.h"
15#include "zfcp_fsf.h"
16
17#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
18#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
19#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
20#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
21#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
22
23#define ZFCP_CFDC_DOWNLOAD 0x00000001
24#define ZFCP_CFDC_UPLOAD 0x00000002
25#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
26
27#define ZFCP_CFDC_IOC_MAGIC 0xDD
28#define ZFCP_CFDC_IOC \
29 _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data)
30
31/**
32 * struct zfcp_cfdc_data - data for ioctl cfdc interface
33 * @signature: request signature
34 * @devno: FCP adapter device number
35 * @command: command code
36 * @fsf_status: returns status of FSF command to userspace
37 * @fsf_status_qual: returned to userspace
38 * @payloads: access conflicts list
39 * @control_file: access control table
40 */
41struct zfcp_cfdc_data {
42 u32 signature;
43 u32 devno;
44 u32 command;
45 u32 fsf_status;
46 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
47 u8 payloads[256];
48 u8 control_file[0];
49};
50
51static int zfcp_cfdc_copy_from_user(struct scatterlist *sg,
52 void __user *user_buffer)
53{
54 unsigned int length;
55 unsigned int size = ZFCP_CFDC_MAX_SIZE;
56
57 while (size) {
58 length = min((unsigned int)size, sg->length);
59 if (copy_from_user(sg_virt(sg++), user_buffer, length))
60 return -EFAULT;
61 user_buffer += length;
62 size -= length;
63 }
64 return 0;
65}
66
67static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
68 struct scatterlist *sg)
69{
70 unsigned int length;
71 unsigned int size = ZFCP_CFDC_MAX_SIZE;
72
73 while (size) {
74 length = min((unsigned int) size, sg->length);
75 if (copy_to_user(user_buffer, sg_virt(sg++), length))
76 return -EFAULT;
77 user_buffer += length;
78 size -= length;
79 }
80 return 0;
81}
82
83static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
84{
85 struct zfcp_adapter *adapter = NULL, *cur_adapter;
86 struct ccw_dev_id dev_id;
87
88 read_lock_irq(&zfcp_data.config_lock);
89 list_for_each_entry(cur_adapter, &zfcp_data.adapter_list_head, list) {
90 ccw_device_get_id(cur_adapter->ccw_device, &dev_id);
91 if (dev_id.devno == devno) {
92 adapter = cur_adapter;
93 zfcp_adapter_get(adapter);
94 break;
95 }
96 }
97 read_unlock_irq(&zfcp_data.config_lock);
98 return adapter;
99}
100
101static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
102{
103 switch (command) {
104 case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
105 fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
106 fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE;
107 break;
108 case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
109 fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
110 fsf_cfdc->option = FSF_CFDC_OPTION_FORCE;
111 break;
112 case ZFCP_CFDC_CMND_FULL_ACCESS:
113 fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
114 fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS;
115 break;
116 case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
117 fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
118 fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
119 break;
120 case ZFCP_CFDC_CMND_UPLOAD:
121 fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE;
122 fsf_cfdc->option = 0;
123 break;
124 default:
125 return -EINVAL;
126 }
127
128 return 0;
129}
130
131static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg,
132 u8 __user *control_file)
133{
134 int retval;
135 retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES);
136 if (retval)
137 return retval;
138
139 sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE;
140
141 if (command & ZFCP_CFDC_WITH_CONTROL_FILE &&
142 command & ZFCP_CFDC_DOWNLOAD) {
143 retval = zfcp_cfdc_copy_from_user(sg, control_file);
144 if (retval) {
145 zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES);
146 return -EFAULT;
147 }
148 }
149
150 return 0;
151}
152
153static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
154 struct zfcp_fsf_req *req)
155{
156 data->fsf_status = req->qtcb->header.fsf_status;
157 memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual,
158 sizeof(union fsf_status_qual));
159 memcpy(&data->payloads, &req->qtcb->bottom.support.els,
160 sizeof(req->qtcb->bottom.support.els));
161}
162
163static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
164 unsigned long buffer)
165{
166 struct zfcp_cfdc_data *data;
167 struct zfcp_cfdc_data __user *data_user;
168 struct zfcp_adapter *adapter;
169 struct zfcp_fsf_req *req;
170 struct zfcp_fsf_cfdc *fsf_cfdc;
171 int retval;
172
173 if (command != ZFCP_CFDC_IOC)
174 return -ENOTTY;
175
176 data_user = (void __user *) buffer;
177 if (!data_user)
178 return -EINVAL;
179
180 fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL);
181 if (!fsf_cfdc)
182 return -ENOMEM;
183
184 data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL);
185 if (!data) {
186 retval = -ENOMEM;
187 goto no_mem_sense;
188 }
189
190 retval = copy_from_user(data, data_user, sizeof(*data));
191 if (retval) {
192 retval = -EFAULT;
193 goto free_buffer;
194 }
195
196 if (data->signature != 0xCFDCACDF) {
197 retval = -EINVAL;
198 goto free_buffer;
199 }
200
201 retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command);
202
203 adapter = zfcp_cfdc_get_adapter(data->devno);
204 if (!adapter) {
205 retval = -ENXIO;
206 goto free_buffer;
207 }
208
209 retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
210 data_user->control_file);
211 if (retval)
212 goto adapter_put;
213 req = zfcp_fsf_control_file(adapter, fsf_cfdc);
214 if (IS_ERR(req)) {
215 retval = PTR_ERR(req);
216 goto free_sg;
217 }
218
219 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
220 retval = -ENXIO;
221 goto free_fsf;
222 }
223
224 zfcp_cfdc_req_to_sense(data, req);
225 retval = copy_to_user(data_user, data, sizeof(*data_user));
226 if (retval) {
227 retval = -EFAULT;
228 goto free_fsf;
229 }
230
231 if (data->command & ZFCP_CFDC_UPLOAD)
232 retval = zfcp_cfdc_copy_to_user(&data_user->control_file,
233 fsf_cfdc->sg);
234
235 free_fsf:
236 zfcp_fsf_req_free(req);
237 free_sg:
238 zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
239 adapter_put:
240 zfcp_adapter_put(adapter);
241 free_buffer:
242 kfree(data);
243 no_mem_sense:
244 kfree(fsf_cfdc);
245 return retval;
246}
247
248static const struct file_operations zfcp_cfdc_fops = {
249 .unlocked_ioctl = zfcp_cfdc_dev_ioctl,
250#ifdef CONFIG_COMPAT
251 .compat_ioctl = zfcp_cfdc_dev_ioctl
252#endif
253};
254
255struct miscdevice zfcp_cfdc_misc = {
256 .minor = MISC_DYNAMIC_MINOR,
257 .name = "zfcp_cfdc",
258 .fops = &zfcp_cfdc_fops,
259};
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index c8bad675dbd1..fca48b88fc53 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -1,22 +1,9 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Debug traces for zfcp.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#include <linux/ctype.h> 9#include <linux/ctype.h>
@@ -29,8 +16,6 @@ module_param(dbfsize, uint, 0400);
29MODULE_PARM_DESC(dbfsize, 16MODULE_PARM_DESC(dbfsize,
30 "number of pages for each debug feature area (default 4)"); 17 "number of pages for each debug feature area (default 4)");
31 18
32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
33
34static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, 19static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
35 int level, char *from, int from_len) 20 int level, char *from, int from_len)
36{ 21{
@@ -186,8 +171,8 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
186 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); 171 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
187 response->fsf_req_status = fsf_req->status; 172 response->fsf_req_status = fsf_req->status;
188 response->sbal_first = fsf_req->sbal_first; 173 response->sbal_first = fsf_req->sbal_first;
189 response->sbal_curr = fsf_req->sbal_curr;
190 response->sbal_last = fsf_req->sbal_last; 174 response->sbal_last = fsf_req->sbal_last;
175 response->sbal_response = fsf_req->sbal_response;
191 response->pool = fsf_req->pool != NULL; 176 response->pool = fsf_req->pool != NULL;
192 response->erp_action = (unsigned long)fsf_req->erp_action; 177 response->erp_action = (unsigned long)fsf_req->erp_action;
193 178
@@ -268,7 +253,7 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
268 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); 253 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
269 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); 254 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
270 255
271 rec->u.status.failed = adapter->status_read_failed; 256 rec->u.status.failed = atomic_read(&adapter->stat_miss);
272 if (status_buffer != NULL) { 257 if (status_buffer != NULL) {
273 rec->u.status.status_type = status_buffer->status_type; 258 rec->u.status.status_type = status_buffer->status_type;
274 rec->u.status.status_subtype = status_buffer->status_subtype; 259 rec->u.status.status_subtype = status_buffer->status_subtype;
@@ -312,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
312/** 297/**
313 * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure 298 * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
314 * @adapter: adapter affected by this QDIO related event 299 * @adapter: adapter affected by this QDIO related event
315 * @status: as passed by qdio module
316 * @qdio_error: as passed by qdio module 300 * @qdio_error: as passed by qdio module
317 * @siga_error: as passed by qdio module
318 * @sbal_index: first buffer with error condition, as passed by qdio module 301 * @sbal_index: first buffer with error condition, as passed by qdio module
319 * @sbal_count: number of buffers affected, as passed by qdio module 302 * @sbal_count: number of buffers affected, as passed by qdio module
320 */ 303 */
321void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, 304void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
322 unsigned int qdio_error, unsigned int siga_error, 305 unsigned int qdio_error, int sbal_index,
323 int sbal_index, int sbal_count) 306 int sbal_count)
324{ 307{
325 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; 308 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
326 unsigned long flags; 309 unsigned long flags;
@@ -328,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
328 spin_lock_irqsave(&adapter->hba_dbf_lock, flags); 311 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
329 memset(r, 0, sizeof(*r)); 312 memset(r, 0, sizeof(*r));
330 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); 313 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
331 r->u.qdio.status = status;
332 r->u.qdio.qdio_error = qdio_error; 314 r->u.qdio.qdio_error = qdio_error;
333 r->u.qdio.siga_error = siga_error;
334 r->u.qdio.sbal_index = sbal_index; 315 r->u.qdio.sbal_index = sbal_index;
335 r->u.qdio.sbal_count = sbal_count; 316 r->u.qdio.sbal_count = sbal_count;
336 debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); 317 debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
@@ -355,8 +336,8 @@ static void zfcp_hba_dbf_view_response(char **p,
355 FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE); 336 FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
356 zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status); 337 zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
357 zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first); 338 zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
358 zfcp_dbf_out(p, "sbal_curr", "0x%02x", r->sbal_curr);
359 zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last); 339 zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
340 zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response);
360 zfcp_dbf_out(p, "pool", "0x%02x", r->pool); 341 zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
361 342
362 switch (r->fsf_command) { 343 switch (r->fsf_command) {
@@ -413,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p,
413 394
414static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) 395static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
415{ 396{
416 zfcp_dbf_out(p, "status", "0x%08x", r->status);
417 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); 397 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
418 zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error);
419 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); 398 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
420 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); 399 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
421} 400}
@@ -515,13 +494,13 @@ static const char *zfcp_rec_dbf_ids[] = {
515 [52] = "port boxed close unit", 494 [52] = "port boxed close unit",
516 [53] = "port boxed fcp", 495 [53] = "port boxed fcp",
517 [54] = "unit boxed fcp", 496 [54] = "unit boxed fcp",
518 [55] = "port access denied ct", 497 [55] = "port access denied",
519 [56] = "port access denied els", 498 [56] = "",
520 [57] = "port access denied open port", 499 [57] = "",
521 [58] = "port access denied close physical", 500 [58] = "",
522 [59] = "unit access denied open unit", 501 [59] = "unit access denied",
523 [60] = "shared unit access denied open unit", 502 [60] = "shared unit access denied open unit",
524 [61] = "unit access denied fcp", 503 [61] = "",
525 [62] = "request timeout", 504 [62] = "request timeout",
526 [63] = "adisc link test reject or timeout", 505 [63] = "adisc link test reject or timeout",
527 [64] = "adisc link test d_id changed", 506 [64] = "adisc link test d_id changed",
@@ -546,8 +525,8 @@ static const char *zfcp_rec_dbf_ids[] = {
546 [80] = "exclusive read-only unit access unsupported", 525 [80] = "exclusive read-only unit access unsupported",
547 [81] = "shared read-write unit access unsupported", 526 [81] = "shared read-write unit access unsupported",
548 [82] = "incoming rscn", 527 [82] = "incoming rscn",
549 [83] = "incoming plogi", 528 [83] = "incoming wwpn",
550 [84] = "incoming logo", 529 [84] = "",
551 [85] = "online", 530 [85] = "online",
552 [86] = "offline", 531 [86] = "offline",
553 [87] = "ccw device gone", 532 [87] = "ccw device gone",
@@ -586,8 +565,8 @@ static const char *zfcp_rec_dbf_ids[] = {
586 [120] = "unknown fsf command", 565 [120] = "unknown fsf command",
587 [121] = "no recommendation for status qualifier", 566 [121] = "no recommendation for status qualifier",
588 [122] = "status read physical port closed in error", 567 [122] = "status read physical port closed in error",
589 [123] = "fc service class not supported ct", 568 [123] = "fc service class not supported",
590 [124] = "fc service class not supported els", 569 [124] = "",
591 [125] = "need newer zfcp", 570 [125] = "need newer zfcp",
592 [126] = "need newer microcode", 571 [126] = "need newer microcode",
593 [127] = "arbitrated loop not supported", 572 [127] = "arbitrated loop not supported",
@@ -595,7 +574,7 @@ static const char *zfcp_rec_dbf_ids[] = {
595 [129] = "qtcb size mismatch", 574 [129] = "qtcb size mismatch",
596 [130] = "unknown fsf status ecd", 575 [130] = "unknown fsf status ecd",
597 [131] = "fcp request too big", 576 [131] = "fcp request too big",
598 [132] = "fc service class not supported fcp", 577 [132] = "",
599 [133] = "data direction not valid fcp", 578 [133] = "data direction not valid fcp",
600 [134] = "command length not valid fcp", 579 [134] = "command length not valid fcp",
601 [135] = "status read act update", 580 [135] = "status read act update",
@@ -603,13 +582,18 @@ static const char *zfcp_rec_dbf_ids[] = {
603 [137] = "hbaapi port open", 582 [137] = "hbaapi port open",
604 [138] = "hbaapi unit open", 583 [138] = "hbaapi unit open",
605 [139] = "hbaapi unit shutdown", 584 [139] = "hbaapi unit shutdown",
606 [140] = "qdio error", 585 [140] = "qdio error outbound",
607 [141] = "scsi host reset", 586 [141] = "scsi host reset",
608 [142] = "dismissing fsf request for recovery action", 587 [142] = "dismissing fsf request for recovery action",
609 [143] = "recovery action timed out", 588 [143] = "recovery action timed out",
610 [144] = "recovery action gone", 589 [144] = "recovery action gone",
611 [145] = "recovery action being processed", 590 [145] = "recovery action being processed",
612 [146] = "recovery action ready for next step", 591 [146] = "recovery action ready for next step",
592 [147] = "qdio error inbound",
593 [148] = "nameserver needed for port scan",
594 [149] = "port scan",
595 [150] = "ptp attach",
596 [151] = "port validation failed",
613}; 597};
614 598
615static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, 599static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
@@ -670,24 +654,20 @@ static struct debug_view zfcp_rec_dbf_view = {
670 * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation 654 * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
671 * @id2: identifier for event 655 * @id2: identifier for event
672 * @adapter: adapter 656 * @adapter: adapter
673 * @lock: non-zero value indicates that erp_lock has not yet been acquired 657 * This function assumes that the caller is holding erp_lock.
674 */ 658 */
675void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock) 659void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter)
676{ 660{
677 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; 661 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
678 unsigned long flags = 0; 662 unsigned long flags = 0;
679 struct list_head *entry; 663 struct list_head *entry;
680 unsigned ready = 0, running = 0, total; 664 unsigned ready = 0, running = 0, total;
681 665
682 if (lock)
683 read_lock_irqsave(&adapter->erp_lock, flags);
684 list_for_each(entry, &adapter->erp_ready_head) 666 list_for_each(entry, &adapter->erp_ready_head)
685 ready++; 667 ready++;
686 list_for_each(entry, &adapter->erp_running_head) 668 list_for_each(entry, &adapter->erp_running_head)
687 running++; 669 running++;
688 total = adapter->erp_total_count; 670 total = adapter->erp_total_count;
689 if (lock)
690 read_unlock_irqrestore(&adapter->erp_lock, flags);
691 671
692 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 672 spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
693 memset(r, 0, sizeof(*r)); 673 memset(r, 0, sizeof(*r));
@@ -696,10 +676,25 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock)
696 r->u.thread.total = total; 676 r->u.thread.total = total;
697 r->u.thread.ready = ready; 677 r->u.thread.ready = ready;
698 r->u.thread.running = running; 678 r->u.thread.running = running;
699 debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); 679 debug_event(adapter->rec_dbf, 6, r, sizeof(*r));
700 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); 680 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
701} 681}
702 682
683/**
684 * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
685 * @id2: identifier for event
686 * @adapter: adapter
687 * This function assumes that the caller does not hold erp_lock.
688 */
689void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter)
690{
691 unsigned long flags;
692
693 read_lock_irqsave(&adapter->erp_lock, flags);
694 zfcp_rec_dbf_event_thread(id2, adapter);
695 read_unlock_irqrestore(&adapter->erp_lock, flags);
696}
697
703static void zfcp_rec_dbf_event_target(u8 id2, void *ref, 698static void zfcp_rec_dbf_event_target(u8 id2, void *ref,
704 struct zfcp_adapter *adapter, 699 struct zfcp_adapter *adapter,
705 atomic_t *status, atomic_t *erp_count, 700 atomic_t *status, atomic_t *erp_count,
@@ -823,7 +818,7 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
823 r->u.action.status = erp_action->status; 818 r->u.action.status = erp_action->status;
824 r->u.action.step = erp_action->step; 819 r->u.action.step = erp_action->step;
825 r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; 820 r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
826 debug_event(adapter->rec_dbf, 4, r, sizeof(*r)); 821 debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
827 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); 822 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
828} 823}
829 824
@@ -960,7 +955,7 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
960 955
961 zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id, 956 zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id,
962 fc_host_port_id(adapter->scsi_host), 957 fc_host_port_id(adapter->scsi_host),
963 *(u8 *)buf->payload, (void *)buf->payload, 958 buf->payload.data[0], (void *)buf->payload.data,
964 length); 959 length);
965} 960}
966 961
@@ -1064,8 +1059,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
1064 if (fsf_req != NULL) { 1059 if (fsf_req != NULL) {
1065 fcp_rsp = (struct fcp_rsp_iu *) 1060 fcp_rsp = (struct fcp_rsp_iu *)
1066 &(fsf_req->qtcb->bottom.io.fcp_rsp); 1061 &(fsf_req->qtcb->bottom.io.fcp_rsp);
1067 fcp_rsp_info = 1062 fcp_rsp_info = (unsigned char *) &fcp_rsp[1];
1068 zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
1069 fcp_sns_info = 1063 fcp_sns_info =
1070 zfcp_get_fcp_sns_info_ptr(fcp_rsp); 1064 zfcp_get_fcp_sns_info_ptr(fcp_rsp);
1071 1065
@@ -1279,5 +1273,3 @@ void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
1279 adapter->hba_dbf = NULL; 1273 adapter->hba_dbf = NULL;
1280 adapter->rec_dbf = NULL; 1274 adapter->rec_dbf = NULL;
1281} 1275}
1282
1283#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 54c34e483457..0ddb18449d11 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -38,7 +38,7 @@ struct zfcp_rec_dbf_record_thread {
38 u32 total; 38 u32 total;
39 u32 ready; 39 u32 ready;
40 u32 running; 40 u32 running;
41} __attribute__ ((packed)); 41};
42 42
43struct zfcp_rec_dbf_record_target { 43struct zfcp_rec_dbf_record_target {
44 u64 ref; 44 u64 ref;
@@ -47,7 +47,7 @@ struct zfcp_rec_dbf_record_target {
47 u64 wwpn; 47 u64 wwpn;
48 u64 fcp_lun; 48 u64 fcp_lun;
49 u32 erp_count; 49 u32 erp_count;
50} __attribute__ ((packed)); 50};
51 51
52struct zfcp_rec_dbf_record_trigger { 52struct zfcp_rec_dbf_record_trigger {
53 u8 want; 53 u8 want;
@@ -59,14 +59,14 @@ struct zfcp_rec_dbf_record_trigger {
59 u64 action; 59 u64 action;
60 u64 wwpn; 60 u64 wwpn;
61 u64 fcp_lun; 61 u64 fcp_lun;
62} __attribute__ ((packed)); 62};
63 63
64struct zfcp_rec_dbf_record_action { 64struct zfcp_rec_dbf_record_action {
65 u32 status; 65 u32 status;
66 u32 step; 66 u32 step;
67 u64 action; 67 u64 action;
68 u64 fsf_req; 68 u64 fsf_req;
69} __attribute__ ((packed)); 69};
70 70
71struct zfcp_rec_dbf_record { 71struct zfcp_rec_dbf_record {
72 u8 id; 72 u8 id;
@@ -77,7 +77,7 @@ struct zfcp_rec_dbf_record {
77 struct zfcp_rec_dbf_record_target target; 77 struct zfcp_rec_dbf_record_target target;
78 struct zfcp_rec_dbf_record_trigger trigger; 78 struct zfcp_rec_dbf_record_trigger trigger;
79 } u; 79 } u;
80} __attribute__ ((packed)); 80};
81 81
82enum { 82enum {
83 ZFCP_REC_DBF_ID_ACTION, 83 ZFCP_REC_DBF_ID_ACTION,
@@ -97,8 +97,8 @@ struct zfcp_hba_dbf_record_response {
97 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; 97 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
98 u32 fsf_req_status; 98 u32 fsf_req_status;
99 u8 sbal_first; 99 u8 sbal_first;
100 u8 sbal_curr;
101 u8 sbal_last; 100 u8 sbal_last;
101 u8 sbal_response;
102 u8 pool; 102 u8 pool;
103 u64 erp_action; 103 u64 erp_action;
104 union { 104 union {
@@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status {
139} __attribute__ ((packed)); 139} __attribute__ ((packed));
140 140
141struct zfcp_hba_dbf_record_qdio { 141struct zfcp_hba_dbf_record_qdio {
142 u32 status;
143 u32 qdio_error; 142 u32 qdio_error;
144 u32 siga_error;
145 u8 sbal_index; 143 u8 sbal_index;
146 u8 sbal_count; 144 u8 sbal_count;
147} __attribute__ ((packed)); 145} __attribute__ ((packed));
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index bda8c77b22da..67f45fc62f53 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -1,22 +1,9 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Global definitions for the zfcp device driver.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#ifndef ZFCP_DEF_H 9#ifndef ZFCP_DEF_H
@@ -26,7 +13,6 @@
26 13
27#include <linux/init.h> 14#include <linux/init.h>
28#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
29#include <linux/miscdevice.h>
30#include <linux/major.h> 16#include <linux/major.h>
31#include <linux/blkdev.h> 17#include <linux/blkdev.h>
32#include <linux/delay.h> 18#include <linux/delay.h>
@@ -53,9 +39,6 @@
53 39
54/********************* GENERAL DEFINES *********************************/ 40/********************* GENERAL DEFINES *********************************/
55 41
56/* zfcp version number, it consists of major, minor, and patch-level number */
57#define ZFCP_VERSION "4.8.0"
58
59/** 42/**
60 * zfcp_sg_to_address - determine kernel address from struct scatterlist 43 * zfcp_sg_to_address - determine kernel address from struct scatterlist
61 * @list: struct scatterlist 44 * @list: struct scatterlist
@@ -93,11 +76,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
93#define ZFCP_DEVICE_MODEL 0x03 76#define ZFCP_DEVICE_MODEL 0x03
94#define ZFCP_DEVICE_MODEL_PRIV 0x04 77#define ZFCP_DEVICE_MODEL_PRIV 0x04
95 78
96/* allow as many chained SBALs as are supported by hardware */
97#define ZFCP_MAX_SBALS_PER_REQ FSF_MAX_SBALS_PER_REQ
98#define ZFCP_MAX_SBALS_PER_CT_REQ FSF_MAX_SBALS_PER_REQ
99#define ZFCP_MAX_SBALS_PER_ELS_REQ FSF_MAX_SBALS_PER_ELS_REQ
100
101/* DMQ bug workaround: don't use last SBALE */ 79/* DMQ bug workaround: don't use last SBALE */
102#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 80#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
103 81
@@ -106,42 +84,17 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
106 84
107/* max. number of (data buffer) SBALEs in largest SBAL chain */ 85/* max. number of (data buffer) SBALEs in largest SBAL chain */
108#define ZFCP_MAX_SBALES_PER_REQ \ 86#define ZFCP_MAX_SBALES_PER_REQ \
109 (ZFCP_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2) 87 (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
110 /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ 88 /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
111 89
112#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8) 90#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
113 /* max. number of (data buffer) SBALEs in largest SBAL chain 91 /* max. number of (data buffer) SBALEs in largest SBAL chain
114 multiplied with number of sectors per 4k block */ 92 multiplied with number of sectors per 4k block */
115 93
116/* FIXME(tune): free space should be one max. SBAL chain plus what? */
117#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
118 - (ZFCP_MAX_SBALS_PER_REQ + 4))
119
120#define ZFCP_SBAL_TIMEOUT (5*HZ)
121
122#define ZFCP_TYPE2_RECOVERY_TIME 8 /* seconds */
123
124/* queue polling (values in microseconds) */
125#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */
126#define ZFCP_MAX_OUTPUT_THRESHOLD 1000 /* FIXME: tune */
127#define ZFCP_MIN_INPUT_THRESHOLD 1 /* ignored by QDIO layer */
128#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
129
130#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
131#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
132
133/********************* FSF SPECIFIC DEFINES *********************************/ 94/********************* FSF SPECIFIC DEFINES *********************************/
134 95
135#define ZFCP_ULP_INFO_VERSION 26
136#define ZFCP_QTCB_VERSION FSF_QTCB_CURRENT_VERSION
137/* ATTENTION: value must not be used by hardware */ 96/* ATTENTION: value must not be used by hardware */
138#define FSF_QTCB_UNSOLICITED_STATUS 0x6305 97#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
139#define ZFCP_STATUS_READ_FAILED_THRESHOLD 3
140#define ZFCP_STATUS_READS_RECOM FSF_STATUS_READS_RECOM
141
142/* Do 1st retry in 1 second, then double the timeout for each following retry */
143#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 1
144#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
145 98
146/* timeout value for "default timer" for fsf requests */ 99/* timeout value for "default timer" for fsf requests */
147#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 100#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
@@ -153,17 +106,9 @@ typedef unsigned long long fcp_lun_t;
153/* data length field may be at variable position in FCP-2 FCP_CMND IU */ 106/* data length field may be at variable position in FCP-2 FCP_CMND IU */
154typedef unsigned int fcp_dl_t; 107typedef unsigned int fcp_dl_t;
155 108
156#define ZFCP_FC_SERVICE_CLASS_DEFAULT FSF_CLASS_3
157
158/* timeout for name-server lookup (in seconds) */ 109/* timeout for name-server lookup (in seconds) */
159#define ZFCP_NS_GID_PN_TIMEOUT 10 110#define ZFCP_NS_GID_PN_TIMEOUT 10
160 111
161/* largest SCSI command we can process */
162/* FCP-2 (FCP_CMND IU) allows up to (255-3+16) */
163#define ZFCP_MAX_SCSI_CMND_LENGTH 255
164/* maximum number of commands in LUN queue (tagged queueing) */
165#define ZFCP_CMND_PER_LUN 32
166
167/* task attribute values in FCP-2 FCP_CMND IU */ 112/* task attribute values in FCP-2 FCP_CMND IU */
168#define SIMPLE_Q 0 113#define SIMPLE_Q 0
169#define HEAD_OF_Q 1 114#define HEAD_OF_Q 1
@@ -224,9 +169,9 @@ struct fcp_rsp_iu {
224#define RSP_CODE_TASKMAN_FAILED 5 169#define RSP_CODE_TASKMAN_FAILED 5
225 170
226/* see fc-fs */ 171/* see fc-fs */
227#define LS_RSCN 0x61040000 172#define LS_RSCN 0x61
228#define LS_LOGO 0x05000000 173#define LS_LOGO 0x05
229#define LS_PLOGI 0x03000000 174#define LS_PLOGI 0x03
230 175
231struct fcp_rscn_head { 176struct fcp_rscn_head {
232 u8 command; 177 u8 command;
@@ -266,7 +211,6 @@ struct fcp_logo {
266 * FC-FS stuff 211 * FC-FS stuff
267 */ 212 */
268#define R_A_TOV 10 /* seconds */ 213#define R_A_TOV 10 /* seconds */
269#define ZFCP_ELS_TIMEOUT (2 * R_A_TOV)
270 214
271#define ZFCP_LS_RLS 0x0f 215#define ZFCP_LS_RLS 0x0f
272#define ZFCP_LS_ADISC 0x52 216#define ZFCP_LS_ADISC 0x52
@@ -311,7 +255,10 @@ struct zfcp_rc_entry {
311#define ZFCP_CT_DIRECTORY_SERVICE 0xFC 255#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
312#define ZFCP_CT_NAME_SERVER 0x02 256#define ZFCP_CT_NAME_SERVER 0x02
313#define ZFCP_CT_SYNCHRONOUS 0x00 257#define ZFCP_CT_SYNCHRONOUS 0x00
258#define ZFCP_CT_SCSI_FCP 0x08
259#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
314#define ZFCP_CT_GID_PN 0x0121 260#define ZFCP_CT_GID_PN 0x0121
261#define ZFCP_CT_GPN_FT 0x0172
315#define ZFCP_CT_MAX_SIZE 0x1020 262#define ZFCP_CT_MAX_SIZE 0x1020
316#define ZFCP_CT_ACCEPT 0x8002 263#define ZFCP_CT_ACCEPT 0x8002
317#define ZFCP_CT_REJECT 0x8001 264#define ZFCP_CT_REJECT 0x8001
@@ -321,107 +268,6 @@ struct zfcp_rc_entry {
321 */ 268 */
322#define ZFCP_CT_TIMEOUT (3 * R_A_TOV) 269#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
323 270
324/******************** LOGGING MACROS AND DEFINES *****************************/
325
326/*
327 * Logging may be applied on certain kinds of driver operations
328 * independently. Additionally, different log-levels are supported for
329 * each of these areas.
330 */
331
332#define ZFCP_NAME "zfcp"
333
334/* independent log areas */
335#define ZFCP_LOG_AREA_OTHER 0
336#define ZFCP_LOG_AREA_SCSI 1
337#define ZFCP_LOG_AREA_FSF 2
338#define ZFCP_LOG_AREA_CONFIG 3
339#define ZFCP_LOG_AREA_CIO 4
340#define ZFCP_LOG_AREA_QDIO 5
341#define ZFCP_LOG_AREA_ERP 6
342#define ZFCP_LOG_AREA_FC 7
343
344/* log level values*/
345#define ZFCP_LOG_LEVEL_NORMAL 0
346#define ZFCP_LOG_LEVEL_INFO 1
347#define ZFCP_LOG_LEVEL_DEBUG 2
348#define ZFCP_LOG_LEVEL_TRACE 3
349
350/*
351 * this allows removal of logging code by the preprocessor
352 * (the most detailed log level still to be compiled in is specified,
353 * higher log levels are removed)
354 */
355#define ZFCP_LOG_LEVEL_LIMIT ZFCP_LOG_LEVEL_TRACE
356
357/* get "loglevel" nibble assignment */
358#define ZFCP_GET_LOG_VALUE(zfcp_lognibble) \
359 ((atomic_read(&zfcp_data.loglevel) >> (zfcp_lognibble<<2)) & 0xF)
360
361/* set "loglevel" nibble */
362#define ZFCP_SET_LOG_NIBBLE(value, zfcp_lognibble) \
363 (value << (zfcp_lognibble << 2))
364
365/* all log-level defaults are combined to generate initial log-level */
366#define ZFCP_LOG_LEVEL_DEFAULTS \
367 (ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_OTHER) | \
368 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_SCSI) | \
369 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FSF) | \
370 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CONFIG) | \
371 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CIO) | \
372 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_QDIO) | \
373 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_ERP) | \
374 ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FC))
375
376/* check whether we have the right level for logging */
377#define ZFCP_LOG_CHECK(level) \
378 ((ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA)) >= level)
379
380/* logging routine for zfcp */
381#define _ZFCP_LOG(fmt, args...) \
382 printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \
383 __LINE__ , ##args)
384
385#define ZFCP_LOG(level, fmt, args...) \
386do { \
387 if (ZFCP_LOG_CHECK(level)) \
388 _ZFCP_LOG(fmt, ##args); \
389} while (0)
390
391#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
392# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0)
393#else
394# define ZFCP_LOG_NORMAL(fmt, args...) \
395do { \
396 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_NORMAL)) \
397 printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
398} while (0)
399#endif
400
401#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
402# define ZFCP_LOG_INFO(fmt, args...) do { } while (0)
403#else
404# define ZFCP_LOG_INFO(fmt, args...) \
405do { \
406 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_INFO)) \
407 printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \
408} while (0)
409#endif
410
411#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
412# define ZFCP_LOG_DEBUG(fmt, args...) do { } while (0)
413#else
414# define ZFCP_LOG_DEBUG(fmt, args...) \
415 ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
416#endif
417
418#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
419# define ZFCP_LOG_TRACE(fmt, args...) do { } while (0)
420#else
421# define ZFCP_LOG_TRACE(fmt, args...) \
422 ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
423#endif
424
425/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ 271/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
426 272
427/* 273/*
@@ -441,6 +287,7 @@ do { \
441#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000 287#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
442#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000 288#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
443#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000 289#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000
290#define ZFCP_STATUS_COMMON_NOESC 0x00200000
444 291
445/* adapter status */ 292/* adapter status */
446#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 293#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
@@ -496,77 +343,6 @@ do { \
496#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800 343#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
497#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 344#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
498 345
499/*********************** ERROR RECOVERY PROCEDURE DEFINES ********************/
500
501#define ZFCP_MAX_ERPS 3
502
503#define ZFCP_ERP_FSFREQ_TIMEOUT (30 * HZ)
504#define ZFCP_ERP_MEMWAIT_TIMEOUT HZ
505
506#define ZFCP_STATUS_ERP_TIMEDOUT 0x10000000
507#define ZFCP_STATUS_ERP_CLOSE_ONLY 0x01000000
508#define ZFCP_STATUS_ERP_DISMISSING 0x00100000
509#define ZFCP_STATUS_ERP_DISMISSED 0x00200000
510#define ZFCP_STATUS_ERP_LOWMEM 0x00400000
511
512#define ZFCP_ERP_STEP_UNINITIALIZED 0x00000000
513#define ZFCP_ERP_STEP_FSF_XCONFIG 0x00000001
514#define ZFCP_ERP_STEP_PHYS_PORT_CLOSING 0x00000010
515#define ZFCP_ERP_STEP_PORT_CLOSING 0x00000100
516#define ZFCP_ERP_STEP_NAMESERVER_OPEN 0x00000200
517#define ZFCP_ERP_STEP_NAMESERVER_LOOKUP 0x00000400
518#define ZFCP_ERP_STEP_PORT_OPENING 0x00000800
519#define ZFCP_ERP_STEP_UNIT_CLOSING 0x00001000
520#define ZFCP_ERP_STEP_UNIT_OPENING 0x00002000
521
522/* Ordered by escalation level (necessary for proper erp-code operation) */
523#define ZFCP_ERP_ACTION_REOPEN_ADAPTER 0x4
524#define ZFCP_ERP_ACTION_REOPEN_PORT_FORCED 0x3
525#define ZFCP_ERP_ACTION_REOPEN_PORT 0x2
526#define ZFCP_ERP_ACTION_REOPEN_UNIT 0x1
527
528#define ZFCP_ERP_ACTION_RUNNING 0x1
529#define ZFCP_ERP_ACTION_READY 0x2
530
531#define ZFCP_ERP_SUCCEEDED 0x0
532#define ZFCP_ERP_FAILED 0x1
533#define ZFCP_ERP_CONTINUES 0x2
534#define ZFCP_ERP_EXIT 0x3
535#define ZFCP_ERP_DISMISSED 0x4
536#define ZFCP_ERP_NOMEM 0x5
537
538
539/******************** CFDC SPECIFIC STUFF *****************************/
540
541/* Firewall data channel sense data record */
542struct zfcp_cfdc_sense_data {
543 u32 signature; /* Request signature */
544 u32 devno; /* FCP adapter device number */
545 u32 command; /* Command code */
546 u32 fsf_status; /* FSF request status and status qualifier */
547 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
548 u8 payloads[256]; /* Access conflicts list */
549 u8 control_file[0]; /* Access control table */
550};
551
552#define ZFCP_CFDC_SIGNATURE 0xCFDCACDF
553
554#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
555#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
556#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
557#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
558#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
559
560#define ZFCP_CFDC_DOWNLOAD 0x00000001
561#define ZFCP_CFDC_UPLOAD 0x00000002
562#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
563
564#define ZFCP_CFDC_DEV_NAME "zfcp_cfdc"
565#define ZFCP_CFDC_DEV_MAJOR MISC_MAJOR
566#define ZFCP_CFDC_DEV_MINOR MISC_DYNAMIC_MINOR
567
568#define ZFCP_CFDC_MAX_CONTROL_FILE_SIZE 127 * 1024
569
570/************************* STRUCTURE DEFINITIONS *****************************/ 346/************************* STRUCTURE DEFINITIONS *****************************/
571 347
572struct zfcp_fsf_req; 348struct zfcp_fsf_req;
@@ -623,7 +399,6 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
623 * @resp_count: number of elements in response scatter-gather list 399 * @resp_count: number of elements in response scatter-gather list
624 * @handler: handler function (called for response to the request) 400 * @handler: handler function (called for response to the request)
625 * @handler_data: data passed to handler function 401 * @handler_data: data passed to handler function
626 * @pool: pointer to memory pool for ct request structure
627 * @timeout: FSF timeout for this request 402 * @timeout: FSF timeout for this request
628 * @completion: completion for synchronization purposes 403 * @completion: completion for synchronization purposes
629 * @status: used to pass error status to calling function 404 * @status: used to pass error status to calling function
@@ -636,7 +411,6 @@ struct zfcp_send_ct {
636 unsigned int resp_count; 411 unsigned int resp_count;
637 zfcp_send_ct_handler_t handler; 412 zfcp_send_ct_handler_t handler;
638 unsigned long handler_data; 413 unsigned long handler_data;
639 mempool_t *pool;
640 int timeout; 414 int timeout;
641 struct completion *completion; 415 struct completion *completion;
642 int status; 416 int status;
@@ -685,13 +459,13 @@ struct zfcp_send_els {
685}; 459};
686 460
687struct zfcp_qdio_queue { 461struct zfcp_qdio_queue {
688 struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ 462 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
689 u8 free_index; /* index of next free bfr 463 u8 first; /* index of next free bfr
690 in queue (free_count>0) */ 464 in queue (free_count>0) */
691 atomic_t free_count; /* number of free buffers 465 atomic_t count; /* number of free buffers
692 in queue */ 466 in queue */
693 rwlock_t queue_lock; /* lock for operations on queue */ 467 spinlock_t lock; /* lock for operations on queue */
694 int distance_from_int; /* SBALs used since PCI indication 468 int pci_batch; /* SBALs since PCI indication
695 was last set */ 469 was last set */
696}; 470};
697 471
@@ -708,6 +482,24 @@ struct zfcp_erp_action {
708 struct timer_list timer; 482 struct timer_list timer;
709}; 483};
710 484
485struct fsf_latency_record {
486 u32 min;
487 u32 max;
488 u64 sum;
489};
490
491struct latency_cont {
492 struct fsf_latency_record channel;
493 struct fsf_latency_record fabric;
494 u64 counter;
495};
496
497struct zfcp_latencies {
498 struct latency_cont read;
499 struct latency_cont write;
500 struct latency_cont cmd;
501 spinlock_t lock;
502};
711 503
712struct zfcp_adapter { 504struct zfcp_adapter {
713 struct list_head list; /* list of adapters */ 505 struct list_head list; /* list of adapters */
@@ -723,24 +515,25 @@ struct zfcp_adapter {
723 u32 adapter_features; /* FCP channel features */ 515 u32 adapter_features; /* FCP channel features */
724 u32 connection_features; /* host connection features */ 516 u32 connection_features; /* host connection features */
725 u32 hardware_version; /* of FCP channel */ 517 u32 hardware_version; /* of FCP channel */
518 u16 timer_ticks; /* time int for a tick */
726 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 519 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
727 struct list_head port_list_head; /* remote port list */ 520 struct list_head port_list_head; /* remote port list */
728 struct list_head port_remove_lh; /* head of ports to be 521 struct list_head port_remove_lh; /* head of ports to be
729 removed */ 522 removed */
730 u32 ports; /* number of remote ports */ 523 u32 ports; /* number of remote ports */
731 atomic_t reqs_active; /* # active FSF reqs */
732 unsigned long req_no; /* unique FSF req number */ 524 unsigned long req_no; /* unique FSF req number */
733 struct list_head *req_list; /* list of pending reqs */ 525 struct list_head *req_list; /* list of pending reqs */
734 spinlock_t req_list_lock; /* request list lock */ 526 spinlock_t req_list_lock; /* request list lock */
735 struct zfcp_qdio_queue request_queue; /* request queue */ 527 struct zfcp_qdio_queue req_q; /* request queue */
736 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 528 u32 fsf_req_seq_no; /* FSF cmnd seq number */
737 wait_queue_head_t request_wq; /* can be used to wait for 529 wait_queue_head_t request_wq; /* can be used to wait for
738 more avaliable SBALs */ 530 more avaliable SBALs */
739 struct zfcp_qdio_queue response_queue; /* response queue */ 531 struct zfcp_qdio_queue resp_q; /* response queue */
740 rwlock_t abort_lock; /* Protects against SCSI 532 rwlock_t abort_lock; /* Protects against SCSI
741 stack abort/command 533 stack abort/command
742 completion races */ 534 completion races */
743 u16 status_read_failed; /* # failed status reads */ 535 atomic_t stat_miss; /* # missing status reads*/
536 struct work_struct stat_work;
744 atomic_t status; /* status of this adapter */ 537 atomic_t status; /* status of this adapter */
745 struct list_head erp_ready_head; /* error recovery for this 538 struct list_head erp_ready_head; /* error recovery for this
746 adapter/devices */ 539 adapter/devices */
@@ -774,13 +567,9 @@ struct zfcp_adapter {
774 struct fc_host_statistics *fc_stats; 567 struct fc_host_statistics *fc_stats;
775 struct fsf_qtcb_bottom_port *stats_reset_data; 568 struct fsf_qtcb_bottom_port *stats_reset_data;
776 unsigned long stats_reset; 569 unsigned long stats_reset;
570 struct work_struct scan_work;
777}; 571};
778 572
779/*
780 * the struct device sysfs_device must be at the beginning of this structure.
781 * pointer to struct device is used to free port structure in release function
782 * of the device. don't change!
783 */
784struct zfcp_port { 573struct zfcp_port {
785 struct device sysfs_device; /* sysfs device */ 574 struct device sysfs_device; /* sysfs device */
786 struct fc_rport *rport; /* rport of fc transport class */ 575 struct fc_rport *rport; /* rport of fc transport class */
@@ -804,10 +593,6 @@ struct zfcp_port {
804 u32 supported_classes; 593 u32 supported_classes;
805}; 594};
806 595
807/* the struct device sysfs_device must be at the beginning of this structure.
808 * pointer to struct device is used to free unit structure in release function
809 * of the device. don't change!
810 */
811struct zfcp_unit { 596struct zfcp_unit {
812 struct device sysfs_device; /* sysfs device */ 597 struct device sysfs_device; /* sysfs device */
813 struct list_head list; /* list of logical units */ 598 struct list_head list; /* list of logical units */
@@ -822,6 +607,7 @@ struct zfcp_unit {
822 struct scsi_device *device; /* scsi device struct pointer */ 607 struct scsi_device *device; /* scsi device struct pointer */
823 struct zfcp_erp_action erp_action; /* pending error recovery */ 608 struct zfcp_erp_action erp_action; /* pending error recovery */
824 atomic_t erp_counter; 609 atomic_t erp_counter;
610 struct zfcp_latencies latencies;
825}; 611};
826 612
827/* FSF request */ 613/* FSF request */
@@ -831,19 +617,19 @@ struct zfcp_fsf_req {
831 struct zfcp_adapter *adapter; /* adapter request belongs to */ 617 struct zfcp_adapter *adapter; /* adapter request belongs to */
832 u8 sbal_number; /* nr of SBALs free for use */ 618 u8 sbal_number; /* nr of SBALs free for use */
833 u8 sbal_first; /* first SBAL for this request */ 619 u8 sbal_first; /* first SBAL for this request */
834 u8 sbal_last; /* last possible SBAL for 620 u8 sbal_last; /* last SBAL for this request */
621 u8 sbal_limit; /* last possible SBAL for
835 this reuest */ 622 this reuest */
836 u8 sbal_curr; /* current SBAL during creation
837 of request */
838 u8 sbale_curr; /* current SBALE during creation 623 u8 sbale_curr; /* current SBALE during creation
839 of request */ 624 of request */
625 u8 sbal_response; /* SBAL used in interrupt */
840 wait_queue_head_t completion_wq; /* can be used by a routine 626 wait_queue_head_t completion_wq; /* can be used by a routine
841 to wait for completion */ 627 to wait for completion */
842 volatile u32 status; /* status of this request */ 628 volatile u32 status; /* status of this request */
843 u32 fsf_command; /* FSF Command copy */ 629 u32 fsf_command; /* FSF Command copy */
844 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 630 struct fsf_qtcb *qtcb; /* address of associated QTCB */
845 u32 seq_no; /* Sequence number of request */ 631 u32 seq_no; /* Sequence number of request */
846 unsigned long data; /* private data of request */ 632 void *data; /* private data of request */
847 struct timer_list timer; /* used for erp or scsi er */ 633 struct timer_list timer; /* used for erp or scsi er */
848 struct zfcp_erp_action *erp_action; /* used if this request is 634 struct zfcp_erp_action *erp_action; /* used if this request is
849 issued on behalf of erp */ 635 issued on behalf of erp */
@@ -851,10 +637,9 @@ struct zfcp_fsf_req {
851 from emergency pool */ 637 from emergency pool */
852 unsigned long long issued; /* request sent time (STCK) */ 638 unsigned long long issued; /* request sent time (STCK) */
853 struct zfcp_unit *unit; 639 struct zfcp_unit *unit;
640 void (*handler)(struct zfcp_fsf_req *);
854}; 641};
855 642
856typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
857
858/* driver data */ 643/* driver data */
859struct zfcp_data { 644struct zfcp_data {
860 struct scsi_host_template scsi_host_template; 645 struct scsi_host_template scsi_host_template;
@@ -873,29 +658,11 @@ struct zfcp_data {
873 char init_busid[BUS_ID_SIZE]; 658 char init_busid[BUS_ID_SIZE];
874 wwn_t init_wwpn; 659 wwn_t init_wwpn;
875 fcp_lun_t init_fcp_lun; 660 fcp_lun_t init_fcp_lun;
876 char *driver_version;
877 struct kmem_cache *fsf_req_qtcb_cache; 661 struct kmem_cache *fsf_req_qtcb_cache;
878 struct kmem_cache *sr_buffer_cache; 662 struct kmem_cache *sr_buffer_cache;
879 struct kmem_cache *gid_pn_cache; 663 struct kmem_cache *gid_pn_cache;
880}; 664};
881 665
882/**
883 * struct zfcp_sg_list - struct describing a scatter-gather list
884 * @sg: pointer to array of (struct scatterlist)
885 * @count: number of elements in scatter-gather list
886 */
887struct zfcp_sg_list {
888 struct scatterlist *sg;
889 unsigned int count;
890};
891
892/* number of elements for various memory pools */
893#define ZFCP_POOL_FSF_REQ_ERP_NR 1
894#define ZFCP_POOL_FSF_REQ_SCSI_NR 1
895#define ZFCP_POOL_FSF_REQ_ABORT_NR 1
896#define ZFCP_POOL_STATUS_READ_NR ZFCP_STATUS_READS_RECOM
897#define ZFCP_POOL_DATA_GID_PN_NR 1
898
899/* struct used by memory pools for fsf_requests */ 666/* struct used by memory pools for fsf_requests */
900struct zfcp_fsf_req_qtcb { 667struct zfcp_fsf_req_qtcb {
901 struct zfcp_fsf_req fsf_req; 668 struct zfcp_fsf_req fsf_req;
@@ -905,7 +672,6 @@ struct zfcp_fsf_req_qtcb {
905/********************** ZFCP SPECIFIC DEFINES ********************************/ 672/********************** ZFCP SPECIFIC DEFINES ********************************/
906 673
907#define ZFCP_REQ_AUTO_CLEANUP 0x00000002 674#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
908#define ZFCP_WAIT_FOR_SBAL 0x00000004
909#define ZFCP_REQ_NO_QTCB 0x00000008 675#define ZFCP_REQ_NO_QTCB 0x00000008
910 676
911#define ZFCP_SET 0x00000100 677#define ZFCP_SET 0x00000100
@@ -916,12 +682,6 @@ struct zfcp_fsf_req_qtcb {
916 ((atomic_read(target) & mask) == mask) 682 ((atomic_read(target) & mask) == mask)
917#endif 683#endif
918 684
919extern void _zfcp_hex_dump(char *, int);
920#define ZFCP_HEX_DUMP(level, addr, count) \
921 if (ZFCP_LOG_CHECK(level)) { \
922 _zfcp_hex_dump(addr, count); \
923 }
924
925#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id) 685#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
926#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter)) 686#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
927#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port)) 687#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
@@ -934,15 +694,6 @@ static inline int zfcp_reqlist_hash(unsigned long req_id)
934 return req_id % REQUEST_LIST_SIZE; 694 return req_id % REQUEST_LIST_SIZE;
935} 695}
936 696
937static inline void zfcp_reqlist_add(struct zfcp_adapter *adapter,
938 struct zfcp_fsf_req *fsf_req)
939{
940 unsigned int idx;
941
942 idx = zfcp_reqlist_hash(fsf_req->req_id);
943 list_add_tail(&fsf_req->list, &adapter->req_list[idx]);
944}
945
946static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter, 697static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
947 struct zfcp_fsf_req *fsf_req) 698 struct zfcp_fsf_req *fsf_req)
948{ 699{
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 805484658dd9..643ac4bba5b5 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1,641 +1,406 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Error Recovery Procedures (ERP).
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
23
24#include "zfcp_ext.h" 9#include "zfcp_ext.h"
25 10
26static int zfcp_erp_adisc(struct zfcp_port *); 11#define ZFCP_MAX_ERPS 3
27static void zfcp_erp_adisc_handler(unsigned long);
28
29static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int, u8,
30 void *);
31static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int, u8,
32 void *);
33static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int, u8, void *);
34static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int, u8, void *);
35
36static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int, u8,
37 void *);
38static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int, u8,
39 void *);
40
41static void zfcp_erp_adapter_block(struct zfcp_adapter *, int);
42static void zfcp_erp_adapter_unblock(struct zfcp_adapter *);
43static void zfcp_erp_port_block(struct zfcp_port *, int);
44static void zfcp_erp_port_unblock(struct zfcp_port *);
45static void zfcp_erp_unit_block(struct zfcp_unit *, int);
46static void zfcp_erp_unit_unblock(struct zfcp_unit *);
47
48static int zfcp_erp_thread(void *);
49
50static int zfcp_erp_strategy(struct zfcp_erp_action *);
51
52static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *);
53static int zfcp_erp_strategy_memwait(struct zfcp_erp_action *);
54static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *, int);
55static int zfcp_erp_strategy_check_unit(struct zfcp_unit *, int);
56static int zfcp_erp_strategy_check_port(struct zfcp_port *, int);
57static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *, int);
58static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *,
59 struct zfcp_port *,
60 struct zfcp_unit *, int);
61static int zfcp_erp_strategy_statechange_detected(atomic_t *, u32);
62static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *,
63 struct zfcp_port *,
64 struct zfcp_unit *, int);
65static int zfcp_erp_strategy_check_queues(struct zfcp_adapter *);
66static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
67
68static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
69static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
70static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
71static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
72static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
73static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
74static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
75static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *);
76static int zfcp_erp_adapter_strategy_open_fsf_statusread(
77 struct zfcp_erp_action *);
78
79static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *);
80static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *);
81
82static int zfcp_erp_port_strategy(struct zfcp_erp_action *);
83static int zfcp_erp_port_strategy_clearstati(struct zfcp_port *);
84static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *);
85static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *);
86static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *);
87static int zfcp_erp_port_strategy_open_nameserver_wakeup(
88 struct zfcp_erp_action *);
89static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *);
90static int zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *);
91static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *);
92
93static int zfcp_erp_unit_strategy(struct zfcp_erp_action *);
94static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
95static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
96static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
97
98static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
99static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
100static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
101static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
102
103static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
104 struct zfcp_port *, struct zfcp_unit *,
105 u8 id, void *ref);
106static int zfcp_erp_action_dequeue(struct zfcp_erp_action *);
107static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *,
108 struct zfcp_port *, struct zfcp_unit *,
109 int);
110
111static void zfcp_erp_action_ready(struct zfcp_erp_action *);
112static int zfcp_erp_action_exists(struct zfcp_erp_action *);
113
114static void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
115static void zfcp_erp_action_to_running(struct zfcp_erp_action *);
116
117static void zfcp_erp_memwait_handler(unsigned long);
118 12
119/** 13enum zfcp_erp_act_flags {
120 * zfcp_close_qdio - close qdio queues for an adapter 14 ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000,
121 */ 15 ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000,
122static void zfcp_close_qdio(struct zfcp_adapter *adapter) 16 ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
123{ 17 ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
124 struct zfcp_qdio_queue *req_queue; 18 ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
125 int first, count; 19};
126 20
127 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 21enum zfcp_erp_steps {
128 return; 22 ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
23 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
24 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
25 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
26 ZFCP_ERP_STEP_NAMESERVER_OPEN = 0x0200,
27 ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
28 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
29 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
30 ZFCP_ERP_STEP_UNIT_OPENING = 0x2000,
31};
129 32
130 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 33enum zfcp_erp_act_type {
131 req_queue = &adapter->request_queue; 34 ZFCP_ERP_ACTION_REOPEN_UNIT = 1,
132 write_lock_irq(&req_queue->queue_lock); 35 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
133 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 36 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
134 write_unlock_irq(&req_queue->queue_lock); 37 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
135 38};
136 while (qdio_shutdown(adapter->ccw_device, 39
137 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) 40enum zfcp_erp_act_state {
138 ssleep(1); 41 ZFCP_ERP_ACTION_RUNNING = 1,
139 42 ZFCP_ERP_ACTION_READY = 2,
140 /* cleanup used outbound sbals */ 43};
141 count = atomic_read(&req_queue->free_count); 44
142 if (count < QDIO_MAX_BUFFERS_PER_Q) { 45enum zfcp_erp_act_result {
143 first = (req_queue->free_index+count) % QDIO_MAX_BUFFERS_PER_Q; 46 ZFCP_ERP_SUCCEEDED = 0,
144 count = QDIO_MAX_BUFFERS_PER_Q - count; 47 ZFCP_ERP_FAILED = 1,
145 zfcp_qdio_zero_sbals(req_queue->buffer, first, count); 48 ZFCP_ERP_CONTINUES = 2,
146 } 49 ZFCP_ERP_EXIT = 3,
147 req_queue->free_index = 0; 50 ZFCP_ERP_DISMISSED = 4,
148 atomic_set(&req_queue->free_count, 0); 51 ZFCP_ERP_NOMEM = 5,
149 req_queue->distance_from_int = 0; 52};
150 adapter->response_queue.free_index = 0; 53
151 atomic_set(&adapter->response_queue.free_count, 0); 54static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
55{
56 zfcp_erp_modify_adapter_status(adapter, 15, NULL,
57 ZFCP_STATUS_COMMON_UNBLOCKED | mask,
58 ZFCP_CLEAR);
152} 59}
153 60
154/** 61static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
155 * zfcp_close_fsf - stop FSF operations for an adapter
156 *
157 * Dismiss and cleanup all pending fsf_reqs (this wakes up all initiators of
158 * requests waiting for completion; especially this returns SCSI commands
159 * with error state).
160 */
161static void zfcp_close_fsf(struct zfcp_adapter *adapter)
162{ 62{
163 /* close queues to ensure that buffers are not accessed by adapter */ 63 struct zfcp_erp_action *curr_act;
164 zfcp_close_qdio(adapter); 64
165 zfcp_fsf_req_dismiss_all(adapter); 65 list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
166 /* reset FSF request sequence number */ 66 if (act == curr_act)
167 adapter->fsf_req_seq_no = 0; 67 return ZFCP_ERP_ACTION_RUNNING;
168 /* all ports and units are closed */ 68 return 0;
169 zfcp_erp_modify_adapter_status(adapter, 24, NULL,
170 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
171} 69}
172 70
173/** 71static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
174 * zfcp_fsf_request_timeout_handler - called if a request timed out
175 * @data: pointer to adapter for handler function
176 *
177 * This function needs to be called if requests (ELS, Generic Service,
178 * or SCSI commands) exceed a certain time limit. The assumption is
179 * that after the time limit the adapter get stuck. So we trigger a reopen of
180 * the adapter.
181 */
182static void zfcp_fsf_request_timeout_handler(unsigned long data)
183{ 72{
184 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 73 struct zfcp_adapter *adapter = act->adapter;
185 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62, 74
186 NULL); 75 list_move(&act->list, &act->adapter->erp_ready_head);
76 zfcp_rec_dbf_event_action(146, act);
77 up(&adapter->erp_ready_sem);
78 zfcp_rec_dbf_event_thread(2, adapter);
187} 79}
188 80
189void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) 81static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
190{ 82{
191 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 83 act->status |= ZFCP_STATUS_ERP_DISMISSED;
192 fsf_req->timer.data = (unsigned long) fsf_req->adapter; 84 if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING)
193 fsf_req->timer.expires = jiffies + timeout; 85 zfcp_erp_action_ready(act);
194 add_timer(&fsf_req->timer);
195} 86}
196 87
197/* 88static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
198 * function:
199 *
200 * purpose: called if an adapter failed,
201 * initiates adapter recovery which is done
202 * asynchronously
203 *
204 * returns: 0 - initiated action successfully
205 * <0 - failed to initiate action
206 */
207static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter,
208 int clear_mask, u8 id, void *ref)
209{ 89{
210 int retval; 90 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
91 zfcp_erp_action_dismiss(&unit->erp_action);
92}
211 93
212 ZFCP_LOG_DEBUG("reopen adapter %s\n", 94static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
213 zfcp_get_busid_by_adapter(adapter)); 95{
96 struct zfcp_unit *unit;
214 97
215 zfcp_erp_adapter_block(adapter, clear_mask); 98 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
99 zfcp_erp_action_dismiss(&port->erp_action);
100 else
101 list_for_each_entry(unit, &port->unit_list_head, list)
102 zfcp_erp_action_dismiss_unit(unit);
103}
216 104
217 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) { 105static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
218 ZFCP_LOG_DEBUG("skipped reopen of failed adapter %s\n", 106{
219 zfcp_get_busid_by_adapter(adapter)); 107 struct zfcp_port *port;
220 /* ensure propagation of failed status to new devices */
221 zfcp_erp_adapter_failed(adapter, 13, NULL);
222 retval = -EIO;
223 goto out;
224 }
225 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
226 adapter, NULL, NULL, id, ref);
227 108
228 out: 109 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
229 return retval; 110 zfcp_erp_action_dismiss(&adapter->erp_action);
111 else
112 list_for_each_entry(port, &adapter->port_list_head, list)
113 zfcp_erp_action_dismiss_port(port);
230} 114}
231 115
232/* 116static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
233 * function: 117 struct zfcp_port *port,
234 * 118 struct zfcp_unit *unit)
235 * purpose: Wrappper for zfcp_erp_adapter_reopen_internal
236 * used to ensure the correct locking
237 *
238 * returns: 0 - initiated action successfully
239 * <0 - failed to initiate action
240 */
241int zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask,
242 u8 id, void *ref)
243{ 119{
244 int retval; 120 int need = want;
245 unsigned long flags; 121 int u_status, p_status, a_status;
246 122
247 read_lock_irqsave(&zfcp_data.config_lock, flags); 123 switch (want) {
248 write_lock(&adapter->erp_lock); 124 case ZFCP_ERP_ACTION_REOPEN_UNIT:
249 retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask, id, ref); 125 u_status = atomic_read(&unit->status);
250 write_unlock(&adapter->erp_lock); 126 if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE)
251 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 127 return 0;
128 p_status = atomic_read(&port->status);
129 if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
130 p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
131 return 0;
132 if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
133 need = ZFCP_ERP_ACTION_REOPEN_PORT;
134 /* fall through */
135 case ZFCP_ERP_ACTION_REOPEN_PORT:
136 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
137 p_status = atomic_read(&port->status);
138 if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
139 return 0;
140 a_status = atomic_read(&adapter->status);
141 if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
142 a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
143 return 0;
144 if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
145 need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
146 /* fall through */
147 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
148 a_status = atomic_read(&adapter->status);
149 if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
150 return 0;
151 }
252 152
253 return retval; 153 return need;
254} 154}
255 155
256int zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask, 156static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
257 u8 id, void *ref) 157 struct zfcp_adapter *adapter,
158 struct zfcp_port *port,
159 struct zfcp_unit *unit)
258{ 160{
259 int retval; 161 struct zfcp_erp_action *erp_action;
162 u32 status = 0;
260 163
261 retval = zfcp_erp_adapter_reopen(adapter, 164 switch (need) {
262 ZFCP_STATUS_COMMON_RUNNING | 165 case ZFCP_ERP_ACTION_REOPEN_UNIT:
263 ZFCP_STATUS_COMMON_ERP_FAILED | 166 zfcp_unit_get(unit);
264 clear_mask, id, ref); 167 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
168 erp_action = &unit->erp_action;
169 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
170 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
171 break;
265 172
266 return retval; 173 case ZFCP_ERP_ACTION_REOPEN_PORT:
267} 174 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
175 zfcp_port_get(port);
176 zfcp_erp_action_dismiss_port(port);
177 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
178 erp_action = &port->erp_action;
179 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
180 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
181 break;
268 182
269int zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask, u8 id, 183 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
270 void *ref) 184 zfcp_adapter_get(adapter);
271{ 185 zfcp_erp_action_dismiss_adapter(adapter);
272 int retval; 186 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
187 erp_action = &adapter->erp_action;
188 if (!(atomic_read(&adapter->status) &
189 ZFCP_STATUS_COMMON_RUNNING))
190 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
191 break;
273 192
274 retval = zfcp_erp_port_reopen(port, 193 default:
275 ZFCP_STATUS_COMMON_RUNNING | 194 return NULL;
276 ZFCP_STATUS_COMMON_ERP_FAILED | 195 }
277 clear_mask, id, ref);
278 196
279 return retval; 197 memset(erp_action, 0, sizeof(struct zfcp_erp_action));
198 erp_action->adapter = adapter;
199 erp_action->port = port;
200 erp_action->unit = unit;
201 erp_action->action = need;
202 erp_action->status = status;
203
204 return erp_action;
280} 205}
281 206
282int zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask, u8 id, 207static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
283 void *ref) 208 struct zfcp_port *port,
209 struct zfcp_unit *unit, u8 id, void *ref)
284{ 210{
285 int retval; 211 int retval = 1, need;
212 struct zfcp_erp_action *act = NULL;
213
214 if (!(atomic_read(&adapter->status) &
215 ZFCP_STATUS_ADAPTER_ERP_THREAD_UP))
216 return -EIO;
286 217
287 retval = zfcp_erp_unit_reopen(unit, 218 need = zfcp_erp_required_act(want, adapter, port, unit);
288 ZFCP_STATUS_COMMON_RUNNING | 219 if (!need)
289 ZFCP_STATUS_COMMON_ERP_FAILED | 220 goto out;
290 clear_mask, id, ref);
291 221
222 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
223 act = zfcp_erp_setup_act(need, adapter, port, unit);
224 if (!act)
225 goto out;
226 ++adapter->erp_total_count;
227 list_add_tail(&act->list, &adapter->erp_ready_head);
228 up(&adapter->erp_ready_sem);
229 zfcp_rec_dbf_event_thread(1, adapter);
230 retval = 0;
231 out:
232 zfcp_rec_dbf_event_trigger(id, ref, want, need, act,
233 adapter, port, unit);
292 return retval; 234 return retval;
293} 235}
294 236
295 237static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
296/** 238 int clear_mask, u8 id, void *ref)
297 * zfcp_erp_adisc - send ADISC ELS command
298 * @port: port structure
299 */
300static int
301zfcp_erp_adisc(struct zfcp_port *port)
302{ 239{
303 struct zfcp_adapter *adapter = port->adapter; 240 zfcp_erp_adapter_block(adapter, clear_mask);
304 struct zfcp_send_els *send_els;
305 struct zfcp_ls_adisc *adisc;
306 void *address = NULL;
307 int retval = 0;
308
309 send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC);
310 if (send_els == NULL)
311 goto nomem;
312
313 send_els->req = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
314 if (send_els->req == NULL)
315 goto nomem;
316 sg_init_table(send_els->req, 1);
317
318 send_els->resp = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC);
319 if (send_els->resp == NULL)
320 goto nomem;
321 sg_init_table(send_els->resp, 1);
322
323 address = (void *) get_zeroed_page(GFP_ATOMIC);
324 if (address == NULL)
325 goto nomem;
326
327 zfcp_address_to_sg(address, send_els->req, sizeof(struct zfcp_ls_adisc));
328 address += PAGE_SIZE >> 1;
329 zfcp_address_to_sg(address, send_els->resp, sizeof(struct zfcp_ls_adisc_acc));
330 send_els->req_count = send_els->resp_count = 1;
331
332 send_els->adapter = adapter;
333 send_els->port = port;
334 send_els->d_id = port->d_id;
335 send_els->handler = zfcp_erp_adisc_handler;
336 send_els->handler_data = (unsigned long) send_els;
337
338 adisc = zfcp_sg_to_address(send_els->req);
339 send_els->ls_code = adisc->code = ZFCP_LS_ADISC;
340
341 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
342 without FC-AL-2 capability, so we don't set it */
343 adisc->wwpn = fc_host_port_name(adapter->scsi_host);
344 adisc->wwnn = fc_host_node_name(adapter->scsi_host);
345 adisc->nport_id = fc_host_port_id(adapter->scsi_host);
346 ZFCP_LOG_INFO("ADISC request from s_id 0x%06x to d_id 0x%06x "
347 "(wwpn=0x%016Lx, wwnn=0x%016Lx, "
348 "hard_nport_id=0x%06x, nport_id=0x%06x)\n",
349 adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
350 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
351 adisc->nport_id);
352
353 retval = zfcp_fsf_send_els(send_els);
354 if (retval != 0) {
355 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
356 "0x%06x on adapter %s\n", send_els->d_id,
357 zfcp_get_busid_by_adapter(adapter));
358 goto freemem;
359 }
360 241
361 goto out; 242 /* ensure propagation of failed status to new devices */
362 243 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
363 nomem: 244 zfcp_erp_adapter_failed(adapter, 13, NULL);
364 retval = -ENOMEM; 245 return -EIO;
365 freemem:
366 if (address != NULL)
367 __free_pages(sg_page(send_els->req), 0);
368 if (send_els != NULL) {
369 kfree(send_els->req);
370 kfree(send_els->resp);
371 kfree(send_els);
372 } 246 }
373 out: 247 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
374 return retval; 248 adapter, NULL, NULL, id, ref);
375} 249}
376 250
377
378/** 251/**
379 * zfcp_erp_adisc_handler - handler for ADISC ELS command 252 * zfcp_erp_adapter_reopen - Reopen adapter.
380 * @data: pointer to struct zfcp_send_els 253 * @adapter: Adapter to reopen.
381 * 254 * @clear: Status flags to clear.
382 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. 255 * @id: Id for debug trace event.
256 * @ref: Reference for debug trace event.
383 */ 257 */
384static void 258void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
385zfcp_erp_adisc_handler(unsigned long data) 259 u8 id, void *ref)
386{ 260{
387 struct zfcp_send_els *send_els; 261 unsigned long flags;
388 struct zfcp_port *port;
389 struct zfcp_adapter *adapter;
390 u32 d_id;
391 struct zfcp_ls_adisc_acc *adisc;
392
393 send_els = (struct zfcp_send_els *) data;
394 adapter = send_els->adapter;
395 port = send_els->port;
396 d_id = send_els->d_id;
397
398 /* request rejected or timed out */
399 if (send_els->status != 0) {
400 ZFCP_LOG_NORMAL("ELS request rejected/timed out, "
401 "force physical port reopen "
402 "(adapter %s, port d_id=0x%06x)\n",
403 zfcp_get_busid_by_adapter(adapter), d_id);
404 if (zfcp_erp_port_forced_reopen(port, 0, 63, NULL))
405 ZFCP_LOG_NORMAL("failed reopen of port "
406 "(adapter %s, wwpn=0x%016Lx)\n",
407 zfcp_get_busid_by_port(port),
408 port->wwpn);
409 goto out;
410 }
411
412 adisc = zfcp_sg_to_address(send_els->resp);
413
414 ZFCP_LOG_INFO("ADISC response from d_id 0x%06x to s_id "
415 "0x%06x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
416 "hard_nport_id=0x%06x, nport_id=0x%06x)\n",
417 d_id, fc_host_port_id(adapter->scsi_host),
418 (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
419 adisc->hard_nport_id, adisc->nport_id);
420
421 /* set wwnn for port */
422 if (port->wwnn == 0)
423 port->wwnn = adisc->wwnn;
424
425 if (port->wwpn != adisc->wwpn) {
426 ZFCP_LOG_NORMAL("d_id assignment changed, reopening "
427 "port (adapter %s, wwpn=0x%016Lx, "
428 "adisc_resp_wwpn=0x%016Lx)\n",
429 zfcp_get_busid_by_port(port),
430 port->wwpn, (wwn_t) adisc->wwpn);
431 if (zfcp_erp_port_reopen(port, 0, 64, NULL))
432 ZFCP_LOG_NORMAL("failed reopen of port "
433 "(adapter %s, wwpn=0x%016Lx)\n",
434 zfcp_get_busid_by_port(port),
435 port->wwpn);
436 }
437 262
438 out: 263 read_lock_irqsave(&zfcp_data.config_lock, flags);
439 zfcp_port_put(port); 264 write_lock(&adapter->erp_lock);
440 __free_pages(sg_page(send_els->req), 0); 265 _zfcp_erp_adapter_reopen(adapter, clear, id, ref);
441 kfree(send_els->req); 266 write_unlock(&adapter->erp_lock);
442 kfree(send_els->resp); 267 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
443 kfree(send_els);
444} 268}
445 269
446
447/** 270/**
448 * zfcp_test_link - lightweight link test procedure 271 * zfcp_erp_adapter_shutdown - Shutdown adapter.
449 * @port: port to be tested 272 * @adapter: Adapter to shut down.
450 * 273 * @clear: Status flags to clear.
451 * Test status of a link to a remote port using the ELS command ADISC. 274 * @id: Id for debug trace event.
275 * @ref: Reference for debug trace event.
452 */ 276 */
453int 277void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
454zfcp_test_link(struct zfcp_port *port) 278 u8 id, void *ref)
455{ 279{
456 int retval; 280 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
457 281 zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref);
458 zfcp_port_get(port);
459 retval = zfcp_erp_adisc(port);
460 if (retval != 0 && retval != -EBUSY) {
461 zfcp_port_put(port);
462 ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx "
463 "on adapter %s\n ", port->wwpn,
464 zfcp_get_busid_by_port(port));
465 retval = zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
466 if (retval != 0) {
467 ZFCP_LOG_NORMAL("reopen of remote port 0x%016Lx "
468 "on adapter %s failed\n", port->wwpn,
469 zfcp_get_busid_by_port(port));
470 retval = -EPERM;
471 }
472 }
473
474 return retval;
475} 282}
476 283
477 284/**
478/* 285 * zfcp_erp_port_shutdown - Shutdown port
479 * function: 286 * @port: Port to shut down.
480 * 287 * @clear: Status flags to clear.
481 * purpose: called if a port failed to be opened normally 288 * @id: Id for debug trace event.
482 * initiates Forced Reopen recovery which is done 289 * @ref: Reference for debug trace event.
483 * asynchronously
484 *
485 * returns: 0 - initiated action successfully
486 * <0 - failed to initiate action
487 */ 290 */
488static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, 291void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, u8 id, void *ref)
489 int clear_mask, u8 id,
490 void *ref)
491{ 292{
492 int retval; 293 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
294 zfcp_erp_port_reopen(port, clear | flags, id, ref);
295}
493 296
494 ZFCP_LOG_DEBUG("forced reopen of port 0x%016Lx on adapter %s\n", 297/**
495 port->wwpn, zfcp_get_busid_by_port(port)); 298 * zfcp_erp_unit_shutdown - Shutdown unit
299 * @unit: Unit to shut down.
300 * @clear: Status flags to clear.
301 * @id: Id for debug trace event.
302 * @ref: Reference for debug trace event.
303 */
304void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, u8 id, void *ref)
305{
306 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
307 zfcp_erp_unit_reopen(unit, clear | flags, id, ref);
308}
496 309
497 zfcp_erp_port_block(port, clear_mask); 310static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
311{
312 zfcp_erp_modify_port_status(port, 17, NULL,
313 ZFCP_STATUS_COMMON_UNBLOCKED | clear,
314 ZFCP_CLEAR);
315}
498 316
499 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) { 317static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
500 ZFCP_LOG_DEBUG("skipped forced reopen of failed port 0x%016Lx " 318 int clear, u8 id, void *ref)
501 "on adapter %s\n", port->wwpn, 319{
502 zfcp_get_busid_by_port(port)); 320 zfcp_erp_port_block(port, clear);
503 retval = -EIO;
504 goto out;
505 }
506 321
507 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 322 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
508 port->adapter, port, NULL, id, ref); 323 return;
509 324
510 out: 325 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
511 return retval; 326 port->adapter, port, NULL, id, ref);
512} 327}
513 328
514/* 329/**
515 * function: 330 * zfcp_erp_port_forced_reopen - Forced close of port and open again
516 * 331 * @port: Port to force close and to reopen.
517 * purpose: Wrappper for zfcp_erp_port_forced_reopen_internal 332 * @id: Id for debug trace event.
518 * used to ensure the correct locking 333 * @ref: Reference for debug trace event.
519 *
520 * returns: 0 - initiated action successfully
521 * <0 - failed to initiate action
522 */ 334 */
523int zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask, u8 id, 335void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, u8 id,
524 void *ref) 336 void *ref)
525{ 337{
526 int retval;
527 unsigned long flags; 338 unsigned long flags;
528 struct zfcp_adapter *adapter; 339 struct zfcp_adapter *adapter = port->adapter;
529 340
530 adapter = port->adapter;
531 read_lock_irqsave(&zfcp_data.config_lock, flags); 341 read_lock_irqsave(&zfcp_data.config_lock, flags);
532 write_lock(&adapter->erp_lock); 342 write_lock(&adapter->erp_lock);
533 retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask, id, 343 _zfcp_erp_port_forced_reopen(port, clear, id, ref);
534 ref);
535 write_unlock(&adapter->erp_lock); 344 write_unlock(&adapter->erp_lock);
536 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 345 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
537
538 return retval;
539} 346}
540 347
541/* 348static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id,
542 * function: 349 void *ref)
543 *
544 * purpose: called if a port is to be opened
545 * initiates Reopen recovery which is done
546 * asynchronously
547 *
548 * returns: 0 - initiated action successfully
549 * <0 - failed to initiate action
550 */
551static int zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask,
552 u8 id, void *ref)
553{ 350{
554 int retval; 351 zfcp_erp_port_block(port, clear);
555
556 ZFCP_LOG_DEBUG("reopen of port 0x%016Lx on adapter %s\n",
557 port->wwpn, zfcp_get_busid_by_port(port));
558 352
559 zfcp_erp_port_block(port, clear_mask); 353 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
560
561 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
562 ZFCP_LOG_DEBUG("skipped reopen of failed port 0x%016Lx "
563 "on adapter %s\n", port->wwpn,
564 zfcp_get_busid_by_port(port));
565 /* ensure propagation of failed status to new devices */ 354 /* ensure propagation of failed status to new devices */
566 zfcp_erp_port_failed(port, 14, NULL); 355 zfcp_erp_port_failed(port, 14, NULL);
567 retval = -EIO; 356 return -EIO;
568 goto out;
569 } 357 }
570 358
571 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, 359 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
572 port->adapter, port, NULL, id, ref); 360 port->adapter, port, NULL, id, ref);
573
574 out:
575 return retval;
576} 361}
577 362
578/** 363/**
579 * zfcp_erp_port_reopen - initiate reopen of a remote port 364 * zfcp_erp_port_reopen - trigger remote port recovery
580 * @port: port to be reopened 365 * @port: port to recover
581 * @clear_mask: specifies flags in port status to be cleared 366 * @clear_mask: flags in port status to be cleared
582 * Return: 0 on success, < 0 on error
583 * 367 *
584 * This is a wrappper function for zfcp_erp_port_reopen_internal. It ensures 368 * Returns 0 if recovery has been triggered, < 0 if not.
585 * correct locking. An error recovery task is initiated to do the reopen.
586 * To wait for the completion of the reopen zfcp_erp_wait should be used.
587 */ 369 */
588int zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask, u8 id, 370int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, void *ref)
589 void *ref)
590{ 371{
591 int retval;
592 unsigned long flags; 372 unsigned long flags;
373 int retval;
593 struct zfcp_adapter *adapter = port->adapter; 374 struct zfcp_adapter *adapter = port->adapter;
594 375
595 read_lock_irqsave(&zfcp_data.config_lock, flags); 376 read_lock_irqsave(&zfcp_data.config_lock, flags);
596 write_lock(&adapter->erp_lock); 377 write_lock(&adapter->erp_lock);
597 retval = zfcp_erp_port_reopen_internal(port, clear_mask, id, ref); 378 retval = _zfcp_erp_port_reopen(port, clear, id, ref);
598 write_unlock(&adapter->erp_lock); 379 write_unlock(&adapter->erp_lock);
599 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 380 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
600 381
601 return retval; 382 return retval;
602} 383}
603 384
604/* 385static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
605 * function:
606 *
607 * purpose: called if a unit is to be opened
608 * initiates Reopen recovery which is done
609 * asynchronously
610 *
611 * returns: 0 - initiated action successfully
612 * <0 - failed to initiate action
613 */
614static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask,
615 u8 id, void *ref)
616{ 386{
617 int retval; 387 zfcp_erp_modify_unit_status(unit, 19, NULL,
618 struct zfcp_adapter *adapter = unit->port->adapter; 388 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
389 ZFCP_CLEAR);
390}
619 391
620 ZFCP_LOG_DEBUG("reopen of unit 0x%016Lx on port 0x%016Lx " 392static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id,
621 "on adapter %s\n", unit->fcp_lun, 393 void *ref)
622 unit->port->wwpn, zfcp_get_busid_by_unit(unit)); 394{
395 struct zfcp_adapter *adapter = unit->port->adapter;
623 396
624 zfcp_erp_unit_block(unit, clear_mask); 397 zfcp_erp_unit_block(unit, clear);
625 398
626 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) { 399 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
627 ZFCP_LOG_DEBUG("skipped reopen of failed unit 0x%016Lx " 400 return;
628 "on port 0x%016Lx on adapter %s\n",
629 unit->fcp_lun, unit->port->wwpn,
630 zfcp_get_busid_by_unit(unit));
631 retval = -EIO;
632 goto out;
633 }
634 401
635 retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, 402 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT,
636 adapter, unit->port, unit, id, ref); 403 adapter, unit->port, unit, id, ref);
637 out:
638 return retval;
639} 404}
640 405
641/** 406/**
@@ -643,987 +408,182 @@ static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask,
643 * @unit: unit to be reopened 408 * @unit: unit to be reopened
644 * @clear_mask: specifies flags in unit status to be cleared 409 * @clear_mask: specifies flags in unit status to be cleared
645 * Return: 0 on success, < 0 on error 410 * Return: 0 on success, < 0 on error
646 *
647 * This is a wrappper for zfcp_erp_unit_reopen_internal. It ensures correct
648 * locking. An error recovery task is initiated to do the reopen.
649 * To wait for the completion of the reopen zfcp_erp_wait should be used.
650 */ 411 */
651int zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask, u8 id, 412void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id, void *ref)
652 void *ref)
653{ 413{
654 int retval;
655 unsigned long flags; 414 unsigned long flags;
656 struct zfcp_adapter *adapter; 415 struct zfcp_port *port = unit->port;
657 struct zfcp_port *port; 416 struct zfcp_adapter *adapter = port->adapter;
658
659 port = unit->port;
660 adapter = port->adapter;
661 417
662 read_lock_irqsave(&zfcp_data.config_lock, flags); 418 read_lock_irqsave(&zfcp_data.config_lock, flags);
663 write_lock(&adapter->erp_lock); 419 write_lock(&adapter->erp_lock);
664 retval = zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref); 420 _zfcp_erp_unit_reopen(unit, clear, id, ref);
665 write_unlock(&adapter->erp_lock); 421 write_unlock(&adapter->erp_lock);
666 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 422 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
667
668 return retval;
669} 423}
670 424
671/** 425static int status_change_set(unsigned long mask, atomic_t *status)
672 * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests
673 */
674static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
675{
676 zfcp_erp_modify_adapter_status(adapter, 15, NULL,
677 ZFCP_STATUS_COMMON_UNBLOCKED |
678 clear_mask, ZFCP_CLEAR);
679}
680
681/* FIXME: isn't really atomic */
682/*
683 * returns the mask which has not been set so far, i.e.
684 * 0 if no bit has been changed, !0 if some bit has been changed
685 */
686static int atomic_test_and_set_mask(unsigned long mask, atomic_t *v)
687{ 426{
688 int changed_bits = (atomic_read(v) /*XOR*/^ mask) & mask; 427 return (atomic_read(status) ^ mask) & mask;
689 atomic_set_mask(mask, v);
690 return changed_bits;
691} 428}
692 429
693/* FIXME: isn't really atomic */ 430static int status_change_clear(unsigned long mask, atomic_t *status)
694/*
695 * returns the mask which has not been cleared so far, i.e.
696 * 0 if no bit has been changed, !0 if some bit has been changed
697 */
698static int atomic_test_and_clear_mask(unsigned long mask, atomic_t *v)
699{ 431{
700 int changed_bits = atomic_read(v) & mask; 432 return atomic_read(status) & mask;
701 atomic_clear_mask(mask, v);
702 return changed_bits;
703} 433}
704 434
705/**
706 * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
707 */
708static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) 435static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
709{ 436{
710 if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, 437 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
711 &adapter->status))
712 zfcp_rec_dbf_event_adapter(16, NULL, adapter); 438 zfcp_rec_dbf_event_adapter(16, NULL, adapter);
439 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
713} 440}
714 441
715/* 442static void zfcp_erp_port_unblock(struct zfcp_port *port)
716 * function:
717 *
718 * purpose: disable I/O,
719 * return any open requests and clean them up,
720 * aim: no pending and incoming I/O
721 *
722 * returns:
723 */
724static void
725zfcp_erp_port_block(struct zfcp_port *port, int clear_mask)
726{
727 zfcp_erp_modify_port_status(port, 17, NULL,
728 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
729 ZFCP_CLEAR);
730}
731
732/*
733 * function:
734 *
735 * purpose: enable I/O
736 *
737 * returns:
738 */
739static void
740zfcp_erp_port_unblock(struct zfcp_port *port)
741{ 443{
742 if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, 444 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
743 &port->status))
744 zfcp_rec_dbf_event_port(18, NULL, port); 445 zfcp_rec_dbf_event_port(18, NULL, port);
446 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
745} 447}
746 448
747/* 449static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
748 * function:
749 *
750 * purpose: disable I/O,
751 * return any open requests and clean them up,
752 * aim: no pending and incoming I/O
753 *
754 * returns:
755 */
756static void
757zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
758{
759 zfcp_erp_modify_unit_status(unit, 19, NULL,
760 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
761 ZFCP_CLEAR);
762}
763
764/*
765 * function:
766 *
767 * purpose: enable I/O
768 *
769 * returns:
770 */
771static void
772zfcp_erp_unit_unblock(struct zfcp_unit *unit)
773{ 450{
774 if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, 451 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
775 &unit->status))
776 zfcp_rec_dbf_event_unit(20, NULL, unit); 452 zfcp_rec_dbf_event_unit(20, NULL, unit);
453 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
777} 454}
778 455
779static void 456static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
780zfcp_erp_action_ready(struct zfcp_erp_action *erp_action)
781{ 457{
782 struct zfcp_adapter *adapter = erp_action->adapter; 458 list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
783 459 zfcp_rec_dbf_event_action(145, erp_action);
784 zfcp_erp_action_to_ready(erp_action);
785 up(&adapter->erp_ready_sem);
786 zfcp_rec_dbf_event_thread(2, adapter, 0);
787} 460}
788 461
789/* 462static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
790 * function:
791 *
792 * purpose:
793 *
794 * returns: <0 erp_action not found in any list
795 * ZFCP_ERP_ACTION_READY erp_action is in ready list
796 * ZFCP_ERP_ACTION_RUNNING erp_action is in running list
797 *
798 * locks: erp_lock must be held
799 */
800static int
801zfcp_erp_action_exists(struct zfcp_erp_action *erp_action)
802{ 463{
803 int retval = -EINVAL; 464 struct zfcp_adapter *adapter = act->adapter;
804 struct list_head *entry;
805 struct zfcp_erp_action *entry_erp_action;
806 struct zfcp_adapter *adapter = erp_action->adapter;
807
808 /* search in running list */
809 list_for_each(entry, &adapter->erp_running_head) {
810 entry_erp_action =
811 list_entry(entry, struct zfcp_erp_action, list);
812 if (entry_erp_action == erp_action) {
813 retval = ZFCP_ERP_ACTION_RUNNING;
814 goto out;
815 }
816 }
817 /* search in ready list */
818 list_for_each(entry, &adapter->erp_ready_head) {
819 entry_erp_action =
820 list_entry(entry, struct zfcp_erp_action, list);
821 if (entry_erp_action == erp_action) {
822 retval = ZFCP_ERP_ACTION_READY;
823 goto out;
824 }
825 }
826 465
827 out: 466 if (!act->fsf_req)
828 return retval; 467 return;
829}
830
831/*
832 * purpose: checks current status of action (timed out, dismissed, ...)
833 * and does appropriate preparations (dismiss fsf request, ...)
834 *
835 * locks: called under erp_lock (disabled interrupts)
836 */
837static void
838zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
839{
840 struct zfcp_adapter *adapter = erp_action->adapter;
841 468
842 if (erp_action->fsf_req) { 469 spin_lock(&adapter->req_list_lock);
843 /* take lock to ensure that request is not deleted meanwhile */ 470 if (zfcp_reqlist_find_safe(adapter, act->fsf_req) &&
844 spin_lock(&adapter->req_list_lock); 471 act->fsf_req->erp_action == act) {
845 if (zfcp_reqlist_find_safe(adapter, erp_action->fsf_req) && 472 if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
846 erp_action->fsf_req->erp_action == erp_action) { 473 ZFCP_STATUS_ERP_TIMEDOUT)) {
847 /* fsf_req still exists */ 474 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
848 /* dismiss fsf_req of timed out/dismissed erp_action */ 475 zfcp_rec_dbf_event_action(142, act);
849 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
850 ZFCP_STATUS_ERP_TIMEDOUT)) {
851 erp_action->fsf_req->status |=
852 ZFCP_STATUS_FSFREQ_DISMISSED;
853 zfcp_rec_dbf_event_action(142, erp_action);
854 }
855 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
856 zfcp_rec_dbf_event_action(143, erp_action);
857 ZFCP_LOG_NORMAL("error: erp step timed out "
858 "(action=%d, fsf_req=%p)\n ",
859 erp_action->action,
860 erp_action->fsf_req);
861 }
862 /*
863 * If fsf_req is neither dismissed nor completed
864 * then keep it running asynchronously and don't mess
865 * with the association of erp_action and fsf_req.
866 */
867 if (erp_action->fsf_req->status &
868 (ZFCP_STATUS_FSFREQ_COMPLETED |
869 ZFCP_STATUS_FSFREQ_DISMISSED)) {
870 /* forget about association between fsf_req
871 and erp_action */
872 erp_action->fsf_req = NULL;
873 }
874 } else {
875 /*
876 * even if this fsf_req has gone, forget about
877 * association between erp_action and fsf_req
878 */
879 erp_action->fsf_req = NULL;
880 } 476 }
881 spin_unlock(&adapter->req_list_lock); 477 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
882 } 478 zfcp_rec_dbf_event_action(143, act);
479 if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
480 ZFCP_STATUS_FSFREQ_DISMISSED))
481 act->fsf_req = NULL;
482 } else
483 act->fsf_req = NULL;
484 spin_unlock(&adapter->req_list_lock);
883} 485}
884 486
885/** 487/**
886 * zfcp_erp_async_handler_nolock - complete erp_action 488 * zfcp_erp_notify - Trigger ERP action.
887 * 489 * @erp_action: ERP action to continue.
888 * Used for normal completion, time-out, dismissal and failure after 490 * @set_mask: ERP action status flags to set.
889 * low memory condition.
890 */ 491 */
891static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, 492void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
892 unsigned long set_mask)
893{
894 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
895 erp_action->status |= set_mask;
896 zfcp_erp_action_ready(erp_action);
897 } else {
898 /* action is ready or gone - nothing to do */
899 }
900}
901
902/**
903 * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking
904 */
905void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
906 unsigned long set_mask)
907{ 493{
908 struct zfcp_adapter *adapter = erp_action->adapter; 494 struct zfcp_adapter *adapter = erp_action->adapter;
909 unsigned long flags; 495 unsigned long flags;
910 496
911 write_lock_irqsave(&adapter->erp_lock, flags); 497 write_lock_irqsave(&adapter->erp_lock, flags);
912 zfcp_erp_async_handler_nolock(erp_action, set_mask); 498 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
913 write_unlock_irqrestore(&adapter->erp_lock, flags); 499 erp_action->status |= set_mask;
914}
915
916/*
917 * purpose: is called for erp_action which was slept waiting for
918 * memory becoming avaliable,
919 * will trigger that this action will be continued
920 */
921static void
922zfcp_erp_memwait_handler(unsigned long data)
923{
924 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
925
926 zfcp_erp_async_handler(erp_action, 0);
927}
928
929/*
930 * purpose: is called if an asynchronous erp step timed out,
931 * action gets an appropriate flag and will be processed
932 * accordingly
933 */
934static void zfcp_erp_timeout_handler(unsigned long data)
935{
936 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
937
938 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
939}
940
941/**
942 * zfcp_erp_action_dismiss - dismiss an erp_action
943 *
944 * adapter->erp_lock must be held
945 *
946 * Dismissal of an erp_action is usually required if an erp_action of
947 * higher priority is generated.
948 */
949static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
950{
951 erp_action->status |= ZFCP_STATUS_ERP_DISMISSED;
952 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING)
953 zfcp_erp_action_ready(erp_action); 500 zfcp_erp_action_ready(erp_action);
954}
955
956int
957zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
958{
959 int retval = 0;
960
961 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
962
963 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
964 if (retval < 0) {
965 ZFCP_LOG_NORMAL("error: creation of erp thread failed for "
966 "adapter %s\n",
967 zfcp_get_busid_by_adapter(adapter));
968 } else {
969 wait_event(adapter->erp_thread_wqh,
970 atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
971 &adapter->status));
972 } 501 }
973 502 write_unlock_irqrestore(&adapter->erp_lock, flags);
974 return (retval < 0);
975}
976
977/*
978 * function:
979 *
980 * purpose:
981 *
982 * returns:
983 *
984 * context: process (i.e. proc-fs or rmmod/insmod)
985 *
986 * note: The caller of this routine ensures that the specified
987 * adapter has been shut down and that this operation
988 * has been completed. Thus, there are no pending erp_actions
989 * which would need to be handled here.
990 */
991int
992zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
993{
994 int retval = 0;
995
996 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
997 up(&adapter->erp_ready_sem);
998 zfcp_rec_dbf_event_thread(2, adapter, 1);
999
1000 wait_event(adapter->erp_thread_wqh,
1001 !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
1002 &adapter->status));
1003
1004 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
1005 &adapter->status);
1006
1007 return retval;
1008}
1009
1010/*
1011 * purpose: is run as a kernel thread,
1012 * goes through list of error recovery actions of associated adapter
1013 * and delegates single action to execution
1014 *
1015 * returns: 0
1016 */
1017static int
1018zfcp_erp_thread(void *data)
1019{
1020 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
1021 struct list_head *next;
1022 struct zfcp_erp_action *erp_action;
1023 unsigned long flags;
1024
1025 daemonize("zfcperp%s", zfcp_get_busid_by_adapter(adapter));
1026 /* Block all signals */
1027 siginitsetinv(&current->blocked, 0);
1028 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1029 wake_up(&adapter->erp_thread_wqh);
1030
1031 while (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
1032 &adapter->status)) {
1033
1034 write_lock_irqsave(&adapter->erp_lock, flags);
1035 next = adapter->erp_ready_head.next;
1036 write_unlock_irqrestore(&adapter->erp_lock, flags);
1037
1038 if (next != &adapter->erp_ready_head) {
1039 erp_action =
1040 list_entry(next, struct zfcp_erp_action, list);
1041 /*
1042 * process action (incl. [re]moving it
1043 * from 'ready' queue)
1044 */
1045 zfcp_erp_strategy(erp_action);
1046 }
1047
1048 /*
1049 * sleep as long as there is nothing to do, i.e.
1050 * no action in 'ready' queue to be processed and
1051 * thread is not to be killed
1052 */
1053 zfcp_rec_dbf_event_thread(4, adapter, 1);
1054 down_interruptible(&adapter->erp_ready_sem);
1055 zfcp_rec_dbf_event_thread(5, adapter, 1);
1056 }
1057
1058 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1059 wake_up(&adapter->erp_thread_wqh);
1060
1061 return 0;
1062}
1063
1064/*
1065 * function:
1066 *
1067 * purpose: drives single error recovery action and schedules higher and
1068 * subordinate actions, if necessary
1069 *
1070 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
1071 * ZFCP_ERP_SUCCEEDED - action finished successfully (deqd)
1072 * ZFCP_ERP_FAILED - action finished unsuccessfully (deqd)
1073 * ZFCP_ERP_EXIT - action finished (dequeued), offline
1074 * ZFCP_ERP_DISMISSED - action canceled (dequeued)
1075 */
1076static int
1077zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1078{
1079 int retval = 0;
1080 struct zfcp_adapter *adapter = erp_action->adapter;
1081 struct zfcp_port *port = erp_action->port;
1082 struct zfcp_unit *unit = erp_action->unit;
1083 int action = erp_action->action;
1084 u32 status = erp_action->status;
1085 unsigned long flags;
1086
1087 /* serialise dismissing, timing out, moving, enqueueing */
1088 read_lock_irqsave(&zfcp_data.config_lock, flags);
1089 write_lock(&adapter->erp_lock);
1090
1091 /* dequeue dismissed action and leave, if required */
1092 retval = zfcp_erp_strategy_check_action(erp_action, retval);
1093 if (retval == ZFCP_ERP_DISMISSED) {
1094 goto unlock;
1095 }
1096
1097 /*
1098 * move action to 'running' queue before processing it
1099 * (to avoid a race condition regarding moving the
1100 * action to the 'running' queue and back)
1101 */
1102 zfcp_erp_action_to_running(erp_action);
1103
1104 /*
1105 * try to process action as far as possible,
1106 * no lock to allow for blocking operations (kmalloc, qdio, ...),
1107 * afterwards the lock is required again for the following reasons:
1108 * - dequeueing of finished action and enqueueing of
1109 * follow-up actions must be atomic so that any other
1110 * reopen-routine does not believe there is nothing to do
1111 * and that it is safe to enqueue something else,
1112 * - we want to force any control thread which is dismissing
1113 * actions to finish this before we decide about
1114 * necessary steps to be taken here further
1115 */
1116 write_unlock(&adapter->erp_lock);
1117 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1118 retval = zfcp_erp_strategy_do_action(erp_action);
1119 read_lock_irqsave(&zfcp_data.config_lock, flags);
1120 write_lock(&adapter->erp_lock);
1121
1122 /*
1123 * check for dismissed status again to avoid follow-up actions,
1124 * failing of targets and so on for dismissed actions,
1125 * we go through down() here because there has been an up()
1126 */
1127 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
1128 retval = ZFCP_ERP_CONTINUES;
1129
1130 switch (retval) {
1131 case ZFCP_ERP_NOMEM:
1132 /* no memory to continue immediately, let it sleep */
1133 if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
1134 ++adapter->erp_low_mem_count;
1135 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
1136 }
1137 /* This condition is true if there is no memory available
1138 for any erp_action on this adapter. This implies that there
1139 are no elements in the memory pool(s) left for erp_actions.
1140 This might happen if an erp_action that used a memory pool
1141 element was timed out.
1142 */
1143 if (adapter->erp_total_count == adapter->erp_low_mem_count) {
1144 ZFCP_LOG_NORMAL("error: no mempool elements available, "
1145 "restarting I/O on adapter %s "
1146 "to free mempool\n",
1147 zfcp_get_busid_by_adapter(adapter));
1148 zfcp_erp_adapter_reopen_internal(adapter, 0, 66, NULL);
1149 } else {
1150 retval = zfcp_erp_strategy_memwait(erp_action);
1151 }
1152 goto unlock;
1153 case ZFCP_ERP_CONTINUES:
1154 /* leave since this action runs asynchronously */
1155 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
1156 --adapter->erp_low_mem_count;
1157 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
1158 }
1159 goto unlock;
1160 }
1161 /* ok, finished action (whatever its result is) */
1162
1163 /* check for unrecoverable targets */
1164 retval = zfcp_erp_strategy_check_target(erp_action, retval);
1165
1166 /* action must be dequeued (here to allow for further ones) */
1167 zfcp_erp_action_dequeue(erp_action);
1168
1169 /*
1170 * put this target through the erp mill again if someone has
1171 * requested to change the status of a target being online
1172 * to offline or the other way around
1173 * (old retval is preserved if nothing has to be done here)
1174 */
1175 retval = zfcp_erp_strategy_statechange(action, status, adapter,
1176 port, unit, retval);
1177
1178 /*
1179 * leave if target is in permanent error state or if
1180 * action is repeated in order to process state change
1181 */
1182 if (retval == ZFCP_ERP_EXIT) {
1183 goto unlock;
1184 }
1185
1186 /* trigger follow up actions */
1187 zfcp_erp_strategy_followup_actions(action, adapter, port, unit, retval);
1188
1189 unlock:
1190 write_unlock(&adapter->erp_lock);
1191 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1192
1193 if (retval != ZFCP_ERP_CONTINUES)
1194 zfcp_erp_action_cleanup(action, adapter, port, unit, retval);
1195
1196 /*
1197 * a few tasks remain when the erp queues are empty
1198 * (don't do that if the last action evaluated was dismissed
1199 * since this clearly indicates that there is more to come) :
1200 * - close the name server port if it is open yet
1201 * (enqueues another [probably] final action)
1202 * - otherwise, wake up whoever wants to be woken when we are
1203 * done with erp
1204 */
1205 if (retval != ZFCP_ERP_DISMISSED)
1206 zfcp_erp_strategy_check_queues(adapter);
1207
1208 return retval;
1209} 503}
1210 504
1211/* 505/**
1212 * function: 506 * zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
1213 * 507 * @data: ERP action (from timer data)
1214 * purpose:
1215 *
1216 * returns: ZFCP_ERP_DISMISSED - if action has been dismissed
1217 * retval - otherwise
1218 */ 508 */
1219static int 509void zfcp_erp_timeout_handler(unsigned long data)
1220zfcp_erp_strategy_check_action(struct zfcp_erp_action *erp_action, int retval)
1221{ 510{
1222 zfcp_erp_strategy_check_fsfreq(erp_action); 511 struct zfcp_erp_action *act = (struct zfcp_erp_action *) data;
1223 512 zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
1224 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
1225 zfcp_erp_action_dequeue(erp_action);
1226 retval = ZFCP_ERP_DISMISSED;
1227 }
1228
1229 return retval;
1230} 513}
1231 514
1232static int 515static void zfcp_erp_memwait_handler(unsigned long data)
1233zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1234{ 516{
1235 int retval = ZFCP_ERP_FAILED; 517 zfcp_erp_notify((struct zfcp_erp_action *)data, 0);
1236
1237 /*
1238 * try to execute/continue action as far as possible,
1239 * note: no lock in subsequent strategy routines
1240 * (this allows these routine to call schedule, e.g.
1241 * kmalloc with such flags or qdio_initialize & friends)
1242 * Note: in case of timeout, the separate strategies will fail
1243 * anyhow. No need for a special action. Even worse, a nameserver
1244 * failure would not wake up waiting ports without the call.
1245 */
1246 switch (erp_action->action) {
1247
1248 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1249 retval = zfcp_erp_adapter_strategy(erp_action);
1250 break;
1251
1252 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1253 retval = zfcp_erp_port_forced_strategy(erp_action);
1254 break;
1255
1256 case ZFCP_ERP_ACTION_REOPEN_PORT:
1257 retval = zfcp_erp_port_strategy(erp_action);
1258 break;
1259
1260 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1261 retval = zfcp_erp_unit_strategy(erp_action);
1262 break;
1263
1264 default:
1265 ZFCP_LOG_NORMAL("bug: unknown erp action requested on "
1266 "adapter %s (action=%d)\n",
1267 zfcp_get_busid_by_adapter(erp_action->adapter),
1268 erp_action->action);
1269 }
1270
1271 return retval;
1272} 518}
1273 519
1274/* 520static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
1275 * function:
1276 *
1277 * purpose: triggers retry of this action after a certain amount of time
1278 * by means of timer provided by erp_action
1279 *
1280 * returns: ZFCP_ERP_CONTINUES - erp_action sleeps in erp running queue
1281 */
1282static int
1283zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
1284{ 521{
1285 int retval = ZFCP_ERP_CONTINUES;
1286
1287 init_timer(&erp_action->timer); 522 init_timer(&erp_action->timer);
1288 erp_action->timer.function = zfcp_erp_memwait_handler; 523 erp_action->timer.function = zfcp_erp_memwait_handler;
1289 erp_action->timer.data = (unsigned long) erp_action; 524 erp_action->timer.data = (unsigned long) erp_action;
1290 erp_action->timer.expires = jiffies + ZFCP_ERP_MEMWAIT_TIMEOUT; 525 erp_action->timer.expires = jiffies + HZ;
1291 add_timer(&erp_action->timer); 526 add_timer(&erp_action->timer);
1292
1293 return retval;
1294} 527}
1295 528
1296/* 529static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
1297 * function: zfcp_erp_adapter_failed 530 int clear, u8 id, void *ref)
1298 *
1299 * purpose: sets the adapter and all underlying devices to ERP_FAILED
1300 *
1301 */
1302void
1303zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
1304{
1305 zfcp_erp_modify_adapter_status(adapter, id, ref,
1306 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1307 ZFCP_LOG_NORMAL("adapter erp failed on adapter %s\n",
1308 zfcp_get_busid_by_adapter(adapter));
1309}
1310
1311/*
1312 * function: zfcp_erp_port_failed
1313 *
1314 * purpose: sets the port and all underlying devices to ERP_FAILED
1315 *
1316 */
1317void
1318zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
1319{
1320 zfcp_erp_modify_port_status(port, id, ref,
1321 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1322
1323 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
1324 ZFCP_LOG_NORMAL("port erp failed (adapter %s, "
1325 "port d_id=0x%06x)\n",
1326 zfcp_get_busid_by_port(port), port->d_id);
1327 else
1328 ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n",
1329 zfcp_get_busid_by_port(port), port->wwpn);
1330}
1331
1332/*
1333 * function: zfcp_erp_unit_failed
1334 *
1335 * purpose: sets the unit to ERP_FAILED
1336 *
1337 */
1338void
1339zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
1340{
1341 zfcp_erp_modify_unit_status(unit, id, ref,
1342 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1343
1344 ZFCP_LOG_NORMAL("unit erp failed on unit 0x%016Lx on port 0x%016Lx "
1345 " on adapter %s\n", unit->fcp_lun,
1346 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
1347}
1348
1349/*
1350 * function: zfcp_erp_strategy_check_target
1351 *
1352 * purpose: increments the erp action count on the device currently in
1353 * recovery if the action failed or resets the count in case of
1354 * success. If a maximum count is exceeded the device is marked
1355 * as ERP_FAILED.
1356 * The 'blocked' state of a target which has been recovered
1357 * successfully is reset.
1358 *
1359 * returns: ZFCP_ERP_CONTINUES - action continues (not considered)
1360 * ZFCP_ERP_SUCCEEDED - action finished successfully
1361 * ZFCP_ERP_EXIT - action failed and will not continue
1362 */
1363static int
1364zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, int result)
1365{
1366 struct zfcp_adapter *adapter = erp_action->adapter;
1367 struct zfcp_port *port = erp_action->port;
1368 struct zfcp_unit *unit = erp_action->unit;
1369
1370 switch (erp_action->action) {
1371
1372 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1373 result = zfcp_erp_strategy_check_unit(unit, result);
1374 break;
1375
1376 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1377 case ZFCP_ERP_ACTION_REOPEN_PORT:
1378 result = zfcp_erp_strategy_check_port(port, result);
1379 break;
1380
1381 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1382 result = zfcp_erp_strategy_check_adapter(adapter, result);
1383 break;
1384 }
1385
1386 return result;
1387}
1388
1389static int
1390zfcp_erp_strategy_statechange(int action,
1391 u32 status,
1392 struct zfcp_adapter *adapter,
1393 struct zfcp_port *port,
1394 struct zfcp_unit *unit, int retval)
1395{
1396 switch (action) {
1397
1398 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1399 if (zfcp_erp_strategy_statechange_detected(&adapter->status,
1400 status)) {
1401 zfcp_erp_adapter_reopen_internal(adapter,
1402 ZFCP_STATUS_COMMON_ERP_FAILED,
1403 67, NULL);
1404 retval = ZFCP_ERP_EXIT;
1405 }
1406 break;
1407
1408 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1409 case ZFCP_ERP_ACTION_REOPEN_PORT:
1410 if (zfcp_erp_strategy_statechange_detected(&port->status,
1411 status)) {
1412 zfcp_erp_port_reopen_internal(port,
1413 ZFCP_STATUS_COMMON_ERP_FAILED,
1414 68, NULL);
1415 retval = ZFCP_ERP_EXIT;
1416 }
1417 break;
1418
1419 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1420 if (zfcp_erp_strategy_statechange_detected(&unit->status,
1421 status)) {
1422 zfcp_erp_unit_reopen_internal(unit,
1423 ZFCP_STATUS_COMMON_ERP_FAILED,
1424 69, NULL);
1425 retval = ZFCP_ERP_EXIT;
1426 }
1427 break;
1428 }
1429
1430 return retval;
1431}
1432
1433static int
1434zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status)
1435{ 531{
1436 return 532 struct zfcp_port *port;
1437 /* take it online */
1438 (atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
1439 (ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status)) ||
1440 /* take it offline */
1441 (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
1442 !(ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status));
1443}
1444
1445static int
1446zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
1447{
1448 switch (result) {
1449 case ZFCP_ERP_SUCCEEDED :
1450 atomic_set(&unit->erp_counter, 0);
1451 zfcp_erp_unit_unblock(unit);
1452 break;
1453 case ZFCP_ERP_FAILED :
1454 atomic_inc(&unit->erp_counter);
1455 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
1456 zfcp_erp_unit_failed(unit, 21, NULL);
1457 break;
1458 case ZFCP_ERP_EXIT :
1459 /* nothing */
1460 break;
1461 }
1462
1463 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
1464 zfcp_erp_unit_block(unit, 0); /* for ZFCP_ERP_SUCCEEDED */
1465 result = ZFCP_ERP_EXIT;
1466 }
1467
1468 return result;
1469}
1470
1471static int
1472zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1473{
1474 switch (result) {
1475 case ZFCP_ERP_SUCCEEDED :
1476 atomic_set(&port->erp_counter, 0);
1477 zfcp_erp_port_unblock(port);
1478 break;
1479 case ZFCP_ERP_FAILED :
1480 atomic_inc(&port->erp_counter);
1481 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
1482 zfcp_erp_port_failed(port, 22, NULL);
1483 break;
1484 case ZFCP_ERP_EXIT :
1485 /* nothing */
1486 break;
1487 }
1488
1489 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
1490 zfcp_erp_port_block(port, 0); /* for ZFCP_ERP_SUCCEEDED */
1491 result = ZFCP_ERP_EXIT;
1492 }
1493 533
1494 return result; 534 list_for_each_entry(port, &adapter->port_list_head, list)
535 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA))
536 _zfcp_erp_port_reopen(port, clear, id, ref);
1495} 537}
1496 538
1497static int 539static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id,
1498zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result) 540 void *ref)
1499{ 541{
1500 switch (result) { 542 struct zfcp_unit *unit;
1501 case ZFCP_ERP_SUCCEEDED :
1502 atomic_set(&adapter->erp_counter, 0);
1503 zfcp_erp_adapter_unblock(adapter);
1504 break;
1505 case ZFCP_ERP_FAILED :
1506 atomic_inc(&adapter->erp_counter);
1507 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
1508 zfcp_erp_adapter_failed(adapter, 23, NULL);
1509 break;
1510 case ZFCP_ERP_EXIT :
1511 /* nothing */
1512 break;
1513 }
1514
1515 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
1516 zfcp_erp_adapter_block(adapter, 0); /* for ZFCP_ERP_SUCCEEDED */
1517 result = ZFCP_ERP_EXIT;
1518 }
1519
1520 return result;
1521}
1522
1523struct zfcp_erp_add_work {
1524 struct zfcp_unit *unit;
1525 struct work_struct work;
1526};
1527 543
1528/** 544 list_for_each_entry(unit, &port->unit_list_head, list)
1529 * zfcp_erp_scsi_scan 545 _zfcp_erp_unit_reopen(unit, clear, id, ref);
1530 * @data: pointer to a struct zfcp_erp_add_work
1531 *
1532 * Registers a logical unit with the SCSI stack.
1533 */
1534static void zfcp_erp_scsi_scan(struct work_struct *work)
1535{
1536 struct zfcp_erp_add_work *p =
1537 container_of(work, struct zfcp_erp_add_work, work);
1538 struct zfcp_unit *unit = p->unit;
1539 struct fc_rport *rport = unit->port->rport;
1540 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1541 unit->scsi_lun, 0);
1542 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1543 zfcp_unit_put(unit);
1544 kfree(p);
1545} 546}
1546 547
1547/** 548static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act)
1548 * zfcp_erp_schedule_work
1549 * @unit: pointer to unit which should be registered with SCSI stack
1550 *
1551 * Schedules work which registers a unit with the SCSI stack
1552 */
1553static void
1554zfcp_erp_schedule_work(struct zfcp_unit *unit)
1555{ 549{
1556 struct zfcp_erp_add_work *p; 550 struct zfcp_adapter *adapter = act->adapter;
551 struct zfcp_port *port = act->port;
552 struct zfcp_unit *unit = act->unit;
553 u32 status = act->status;
1557 554
1558 p = kzalloc(sizeof(*p), GFP_KERNEL);
1559 if (!p) {
1560 ZFCP_LOG_NORMAL("error: Out of resources. Could not register "
1561 "the FCP-LUN 0x%Lx connected to "
1562 "the port with WWPN 0x%Lx connected to "
1563 "the adapter %s with the SCSI stack.\n",
1564 unit->fcp_lun,
1565 unit->port->wwpn,
1566 zfcp_get_busid_by_unit(unit));
1567 return;
1568 }
1569
1570 zfcp_unit_get(unit);
1571 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1572 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1573 p->unit = unit;
1574 schedule_work(&p->work);
1575}
1576
1577/*
1578 * function:
1579 *
1580 * purpose: remaining things in good cases,
1581 * escalation in bad cases
1582 *
1583 * returns:
1584 */
1585static int
1586zfcp_erp_strategy_followup_actions(int action,
1587 struct zfcp_adapter *adapter,
1588 struct zfcp_port *port,
1589 struct zfcp_unit *unit, int status)
1590{
1591 /* initiate follow-up actions depending on success of finished action */ 555 /* initiate follow-up actions depending on success of finished action */
1592 switch (action) { 556 switch (act->action) {
1593 557
1594 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 558 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1595 if (status == ZFCP_ERP_SUCCEEDED) 559 if (status == ZFCP_ERP_SUCCEEDED)
1596 zfcp_erp_port_reopen_all_internal(adapter, 0, 70, NULL); 560 _zfcp_erp_port_reopen_all(adapter, 0, 70, NULL);
1597 else 561 else
1598 zfcp_erp_adapter_reopen_internal(adapter, 0, 71, NULL); 562 _zfcp_erp_adapter_reopen(adapter, 0, 71, NULL);
1599 break; 563 break;
1600 564
1601 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 565 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1602 if (status == ZFCP_ERP_SUCCEEDED) 566 if (status == ZFCP_ERP_SUCCEEDED)
1603 zfcp_erp_port_reopen_internal(port, 0, 72, NULL); 567 _zfcp_erp_port_reopen(port, 0, 72, NULL);
1604 else 568 else
1605 zfcp_erp_adapter_reopen_internal(adapter, 0, 73, NULL); 569 _zfcp_erp_adapter_reopen(adapter, 0, 73, NULL);
1606 break; 570 break;
1607 571
1608 case ZFCP_ERP_ACTION_REOPEN_PORT: 572 case ZFCP_ERP_ACTION_REOPEN_PORT:
1609 if (status == ZFCP_ERP_SUCCEEDED) 573 if (status == ZFCP_ERP_SUCCEEDED)
1610 zfcp_erp_unit_reopen_all_internal(port, 0, 74, NULL); 574 _zfcp_erp_unit_reopen_all(port, 0, 74, NULL);
1611 else 575 else
1612 zfcp_erp_port_forced_reopen_internal(port, 0, 75, NULL); 576 _zfcp_erp_port_forced_reopen(port, 0, 75, NULL);
1613 break; 577 break;
1614 578
1615 case ZFCP_ERP_ACTION_REOPEN_UNIT: 579 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1616 /* Nothing to do if status == ZFCP_ERP_SUCCEEDED */
1617 if (status != ZFCP_ERP_SUCCEEDED) 580 if (status != ZFCP_ERP_SUCCEEDED)
1618 zfcp_erp_port_reopen_internal(unit->port, 0, 76, NULL); 581 _zfcp_erp_port_reopen(unit->port, 0, 76, NULL);
1619 break; 582 break;
1620 } 583 }
1621
1622 return 0;
1623} 584}
1624 585
1625static int 586static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
1626zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter)
1627{ 587{
1628 unsigned long flags; 588 unsigned long flags;
1629 589
@@ -1637,1277 +597,622 @@ zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter)
1637 } 597 }
1638 read_unlock(&adapter->erp_lock); 598 read_unlock(&adapter->erp_lock);
1639 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 599 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1640
1641 return 0;
1642} 600}
1643 601
1644/** 602static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
1645 * zfcp_erp_wait - wait for completion of error recovery on an adapter
1646 * @adapter: adapter for which to wait for completion of its error recovery
1647 * Return: 0
1648 */
1649int
1650zfcp_erp_wait(struct zfcp_adapter *adapter)
1651{ 603{
1652 int retval = 0; 604 if (zfcp_qdio_open(act->adapter))
1653 605 return ZFCP_ERP_FAILED;
1654 wait_event(adapter->erp_done_wqh, 606 init_waitqueue_head(&act->adapter->request_wq);
1655 !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 607 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
1656 &adapter->status)); 608 return ZFCP_ERP_SUCCEEDED;
1657
1658 return retval;
1659} 609}
1660 610
1661void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id, 611static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
1662 void *ref, u32 mask, int set_or_clear)
1663{ 612{
1664 struct zfcp_port *port; 613 struct zfcp_port *port;
1665 u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS; 614 port = zfcp_port_enqueue(adapter, adapter->peer_wwpn, 0,
1666 615 adapter->peer_d_id);
1667 if (set_or_clear == ZFCP_SET) { 616 if (IS_ERR(port)) /* error or port already attached */
1668 changed = atomic_test_and_set_mask(mask, &adapter->status); 617 return;
1669 } else { 618 _zfcp_erp_port_reopen(port, 0, 150, NULL);
1670 changed = atomic_test_and_clear_mask(mask, &adapter->status);
1671 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1672 atomic_set(&adapter->erp_counter, 0);
1673 }
1674 if (changed)
1675 zfcp_rec_dbf_event_adapter(id, ref, adapter);
1676
1677 /* Deal with all underlying devices, only pass common_mask */
1678 if (common_mask)
1679 list_for_each_entry(port, &adapter->port_list_head, list)
1680 zfcp_erp_modify_port_status(port, id, ref, common_mask,
1681 set_or_clear);
1682} 619}
1683 620
1684/* 621static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
1685 * function: zfcp_erp_modify_port_status
1686 *
1687 * purpose: sets the port and all underlying devices to ERP_FAILED
1688 *
1689 */
1690void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
1691 u32 mask, int set_or_clear)
1692{ 622{
1693 struct zfcp_unit *unit; 623 int retries;
1694 u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS; 624 int sleep = 1;
1695 625 struct zfcp_adapter *adapter = erp_action->adapter;
1696 if (set_or_clear == ZFCP_SET) {
1697 changed = atomic_test_and_set_mask(mask, &port->status);
1698 } else {
1699 changed = atomic_test_and_clear_mask(mask, &port->status);
1700 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1701 atomic_set(&port->erp_counter, 0);
1702 }
1703 if (changed)
1704 zfcp_rec_dbf_event_port(id, ref, port);
1705
1706 /* Modify status of all underlying devices, only pass common mask */
1707 if (common_mask)
1708 list_for_each_entry(unit, &port->unit_list_head, list)
1709 zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
1710 set_or_clear);
1711}
1712 626
1713/* 627 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
1714 * function: zfcp_erp_modify_unit_status
1715 *
1716 * purpose: sets the unit to ERP_FAILED
1717 *
1718 */
1719void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
1720 u32 mask, int set_or_clear)
1721{
1722 u32 changed;
1723 628
1724 if (set_or_clear == ZFCP_SET) { 629 for (retries = 7; retries; retries--) {
1725 changed = atomic_test_and_set_mask(mask, &unit->status); 630 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
1726 } else { 631 &adapter->status);
1727 changed = atomic_test_and_clear_mask(mask, &unit->status); 632 write_lock_irq(&adapter->erp_lock);
1728 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { 633 zfcp_erp_action_to_running(erp_action);
1729 atomic_set(&unit->erp_counter, 0); 634 write_unlock_irq(&adapter->erp_lock);
635 if (zfcp_fsf_exchange_config_data(erp_action)) {
636 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
637 &adapter->status);
638 return ZFCP_ERP_FAILED;
1730 } 639 }
1731 }
1732 if (changed)
1733 zfcp_rec_dbf_event_unit(id, ref, unit);
1734}
1735 640
1736/* 641 zfcp_rec_dbf_event_thread_lock(6, adapter);
1737 * function: 642 down(&adapter->erp_ready_sem);
1738 * 643 zfcp_rec_dbf_event_thread_lock(7, adapter);
1739 * purpose: Wrappper for zfcp_erp_port_reopen_all_internal 644 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
1740 * used to ensure the correct locking 645 break;
1741 *
1742 * returns: 0 - initiated action successfully
1743 * <0 - failed to initiate action
1744 */
1745int zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask,
1746 u8 id, void *ref)
1747{
1748 int retval;
1749 unsigned long flags;
1750
1751 read_lock_irqsave(&zfcp_data.config_lock, flags);
1752 write_lock(&adapter->erp_lock);
1753 retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask, id,
1754 ref);
1755 write_unlock(&adapter->erp_lock);
1756 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1757
1758 return retval;
1759}
1760 646
1761static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter, 647 if (!(atomic_read(&adapter->status) &
1762 int clear_mask, u8 id, void *ref) 648 ZFCP_STATUS_ADAPTER_HOST_CON_INIT))
1763{ 649 break;
1764 int retval = 0;
1765 struct zfcp_port *port;
1766 650
1767 list_for_each_entry(port, &adapter->port_list_head, list) 651 ssleep(sleep);
1768 if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) 652 sleep *= 2;
1769 zfcp_erp_port_reopen_internal(port, clear_mask, id, 653 }
1770 ref);
1771 654
1772 return retval; 655 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
1773} 656 &adapter->status);
1774 657
1775/* 658 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
1776 * function: 659 return ZFCP_ERP_FAILED;
1777 *
1778 * purpose:
1779 *
1780 * returns: FIXME
1781 */
1782static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port,
1783 int clear_mask, u8 id, void *ref)
1784{
1785 int retval = 0;
1786 struct zfcp_unit *unit;
1787 660
1788 list_for_each_entry(unit, &port->unit_list_head, list) 661 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
1789 zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref); 662 zfcp_erp_enqueue_ptp_port(adapter);
1790 663
1791 return retval; 664 return ZFCP_ERP_SUCCEEDED;
1792} 665}
1793 666
1794/* 667static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
1795 * function:
1796 *
1797 * purpose: this routine executes the 'Reopen Adapter' action
1798 * (the entire action is processed synchronously, since
1799 * there are no actions which might be run concurrently
1800 * per definition)
1801 *
1802 * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
1803 * ZFCP_ERP_FAILED - action finished unsuccessfully
1804 */
1805static int
1806zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
1807{ 668{
1808 int retval; 669 int ret;
1809 struct zfcp_adapter *adapter = erp_action->adapter; 670 struct zfcp_adapter *adapter = act->adapter;
1810
1811 retval = zfcp_erp_adapter_strategy_close(erp_action);
1812 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
1813 retval = ZFCP_ERP_EXIT;
1814 else
1815 retval = zfcp_erp_adapter_strategy_open(erp_action);
1816 671
1817 if (retval == ZFCP_ERP_FAILED) { 672 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
1818 ZFCP_LOG_INFO("Waiting to allow the adapter %s "
1819 "to recover itself\n",
1820 zfcp_get_busid_by_adapter(adapter));
1821 ssleep(ZFCP_TYPE2_RECOVERY_TIME);
1822 }
1823 673
1824 return retval; 674 write_lock_irq(&adapter->erp_lock);
1825} 675 zfcp_erp_action_to_running(act);
676 write_unlock_irq(&adapter->erp_lock);
1826 677
1827/* 678 ret = zfcp_fsf_exchange_port_data(act);
1828 * function: 679 if (ret == -EOPNOTSUPP)
1829 * 680 return ZFCP_ERP_SUCCEEDED;
1830 * purpose: 681 if (ret)
1831 * 682 return ZFCP_ERP_FAILED;
1832 * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
1833 * ZFCP_ERP_FAILED - action finished unsuccessfully
1834 */
1835static int
1836zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *erp_action)
1837{
1838 int retval;
1839 683
1840 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, 684 zfcp_rec_dbf_event_thread_lock(8, adapter);
1841 &erp_action->adapter->status); 685 down(&adapter->erp_ready_sem);
1842 retval = zfcp_erp_adapter_strategy_generic(erp_action, 1); 686 zfcp_rec_dbf_event_thread_lock(9, adapter);
1843 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, 687 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
1844 &erp_action->adapter->status); 688 return ZFCP_ERP_FAILED;
1845 689
1846 return retval; 690 return ZFCP_ERP_SUCCEEDED;
1847} 691}
1848 692
1849/* 693static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
1850 * function:
1851 *
1852 * purpose:
1853 *
1854 * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
1855 * ZFCP_ERP_FAILED - action finished unsuccessfully
1856 */
1857static int
1858zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *erp_action)
1859{ 694{
1860 int retval; 695 if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
696 return ZFCP_ERP_FAILED;
1861 697
1862 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, 698 if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
1863 &erp_action->adapter->status); 699 return ZFCP_ERP_FAILED;
1864 retval = zfcp_erp_adapter_strategy_generic(erp_action, 0);
1865 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING,
1866 &erp_action->adapter->status);
1867 700
1868 return retval; 701 atomic_set(&act->adapter->stat_miss, 16);
702 if (zfcp_status_read_refill(act->adapter))
703 return ZFCP_ERP_FAILED;
704
705 return ZFCP_ERP_SUCCEEDED;
1869} 706}
1870 707
1871/* 708static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
1872 * function: zfcp_register_adapter 709 int close)
1873 *
1874 * purpose: allocate the irq associated with this devno and register
1875 * the FSF adapter with the SCSI stack
1876 *
1877 * returns:
1878 */
1879static int
1880zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
1881{ 710{
1882 int retval = ZFCP_ERP_SUCCEEDED; 711 int retval = ZFCP_ERP_SUCCEEDED;
712 struct zfcp_adapter *adapter = act->adapter;
1883 713
1884 if (close) 714 if (close)
1885 goto close_only; 715 goto close_only;
1886 716
1887 retval = zfcp_erp_adapter_strategy_open_qdio(erp_action); 717 retval = zfcp_erp_adapter_strategy_open_qdio(act);
1888 if (retval != ZFCP_ERP_SUCCEEDED) 718 if (retval != ZFCP_ERP_SUCCEEDED)
1889 goto failed_qdio; 719 goto failed_qdio;
1890 720
1891 retval = zfcp_erp_adapter_strategy_open_fsf(erp_action); 721 retval = zfcp_erp_adapter_strategy_open_fsf(act);
1892 if (retval != ZFCP_ERP_SUCCEEDED) 722 if (retval != ZFCP_ERP_SUCCEEDED)
1893 goto failed_openfcp; 723 goto failed_openfcp;
1894 724
1895 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &erp_action->adapter->status); 725 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);
1896 goto out; 726 schedule_work(&act->adapter->scan_work);
727
728 return ZFCP_ERP_SUCCEEDED;
1897 729
1898 close_only: 730 close_only:
1899 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 731 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1900 &erp_action->adapter->status); 732 &act->adapter->status);
1901 733
1902 failed_openfcp: 734 failed_openfcp:
1903 zfcp_close_fsf(erp_action->adapter); 735 /* close queues to ensure that buffers are not accessed by adapter */
736 zfcp_qdio_close(adapter);
737 zfcp_fsf_req_dismiss_all(adapter);
738 adapter->fsf_req_seq_no = 0;
739 /* all ports and units are closed */
740 zfcp_erp_modify_adapter_status(adapter, 24, NULL,
741 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
1904 failed_qdio: 742 failed_qdio:
1905 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 743 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
1906 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 744 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
1907 ZFCP_STATUS_ADAPTER_XPORT_OK, 745 ZFCP_STATUS_ADAPTER_XPORT_OK,
1908 &erp_action->adapter->status); 746 &act->adapter->status);
1909 out:
1910 return retval; 747 return retval;
1911} 748}
1912 749
1913/* 750static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
1914 * function: zfcp_qdio_init
1915 *
1916 * purpose: setup QDIO operation for specified adapter
1917 *
1918 * returns: 0 - successful setup
1919 * !0 - failed setup
1920 */
1921static int
1922zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
1923{ 751{
1924 int retval; 752 int retval;
1925 int i;
1926 volatile struct qdio_buffer_element *sbale;
1927 struct zfcp_adapter *adapter = erp_action->adapter;
1928
1929 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
1930 ZFCP_LOG_NORMAL("bug: second attempt to set up QDIO on "
1931 "adapter %s\n",
1932 zfcp_get_busid_by_adapter(adapter));
1933 goto failed_sanity;
1934 }
1935
1936 if (qdio_establish(&adapter->qdio_init_data) != 0) {
1937 ZFCP_LOG_INFO("error: establishment of QDIO queues failed "
1938 "on adapter %s\n",
1939 zfcp_get_busid_by_adapter(adapter));
1940 goto failed_qdio_establish;
1941 }
1942
1943 if (qdio_activate(adapter->ccw_device, 0) != 0) {
1944 ZFCP_LOG_INFO("error: activation of QDIO queues failed "
1945 "on adapter %s\n",
1946 zfcp_get_busid_by_adapter(adapter));
1947 goto failed_qdio_activate;
1948 }
1949
1950 /*
1951 * put buffers into response queue,
1952 */
1953 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
1954 sbale = &(adapter->response_queue.buffer[i]->element[0]);
1955 sbale->length = 0;
1956 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
1957 sbale->addr = NULL;
1958 }
1959
1960 ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
1961 "queue_no=%i, index_in_queue=%i, count=%i)\n",
1962 zfcp_get_busid_by_adapter(adapter),
1963 QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q);
1964
1965 retval = do_QDIO(adapter->ccw_device,
1966 QDIO_FLAG_SYNC_INPUT,
1967 0, 0, QDIO_MAX_BUFFERS_PER_Q, NULL);
1968
1969 if (retval) {
1970 ZFCP_LOG_NORMAL("bug: setup of QDIO failed (retval=%d)\n",
1971 retval);
1972 goto failed_do_qdio;
1973 } else {
1974 adapter->response_queue.free_index = 0;
1975 atomic_set(&adapter->response_queue.free_count, 0);
1976 ZFCP_LOG_DEBUG("%i buffers successfully enqueued to "
1977 "response queue\n", QDIO_MAX_BUFFERS_PER_Q);
1978 }
1979 /* set index of first avalable SBALS / number of available SBALS */
1980 adapter->request_queue.free_index = 0;
1981 atomic_set(&adapter->request_queue.free_count, QDIO_MAX_BUFFERS_PER_Q);
1982 adapter->request_queue.distance_from_int = 0;
1983
1984 /* initialize waitqueue used to wait for free SBALs in requests queue */
1985 init_waitqueue_head(&adapter->request_wq);
1986 753
1987 /* ok, we did it - skip all cleanups for different failures */ 754 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
1988 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 755 zfcp_erp_adapter_strategy_generic(act, 1); /* close */
1989 retval = ZFCP_ERP_SUCCEEDED; 756 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
1990 goto out; 757 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
758 return ZFCP_ERP_EXIT;
1991 759
1992 failed_do_qdio: 760 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
1993 /* NOP */ 761 retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */
762 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
1994 763
1995 failed_qdio_activate: 764 if (retval == ZFCP_ERP_FAILED)
1996 while (qdio_shutdown(adapter->ccw_device, 765 ssleep(8);
1997 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
1998 ssleep(1);
1999
2000 failed_qdio_establish:
2001 failed_sanity:
2002 retval = ZFCP_ERP_FAILED;
2003 766
2004 out:
2005 return retval; 767 return retval;
2006} 768}
2007 769
2008 770static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
2009static int
2010zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2011{ 771{
2012 int retval; 772 int retval;
2013 773
2014 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 774 retval = zfcp_fsf_close_physical_port(act);
2015 if (retval == ZFCP_ERP_FAILED) 775 if (retval == -ENOMEM)
776 return ZFCP_ERP_NOMEM;
777 act->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
778 if (retval)
2016 return ZFCP_ERP_FAILED; 779 return ZFCP_ERP_FAILED;
2017 780
2018 retval = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); 781 return ZFCP_ERP_CONTINUES;
2019 if (retval == ZFCP_ERP_FAILED)
2020 return ZFCP_ERP_FAILED;
2021
2022 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2023} 782}
2024 783
2025static int 784static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
2026zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2027{ 785{
2028 int retval = ZFCP_ERP_SUCCEEDED; 786 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
2029 int retries; 787 ZFCP_STATUS_COMMON_CLOSING |
2030 int sleep = ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP; 788 ZFCP_STATUS_COMMON_ACCESS_DENIED |
2031 struct zfcp_adapter *adapter = erp_action->adapter; 789 ZFCP_STATUS_PORT_DID_DID |
2032 790 ZFCP_STATUS_PORT_PHYS_CLOSING |
2033 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); 791 ZFCP_STATUS_PORT_INVALID_WWPN,
2034 792 &port->status);
2035 for (retries = ZFCP_EXCHANGE_CONFIG_DATA_RETRIES; retries; retries--) { 793}
2036 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2037 &adapter->status);
2038 ZFCP_LOG_DEBUG("Doing exchange config data\n");
2039 write_lock_irq(&adapter->erp_lock);
2040 zfcp_erp_action_to_running(erp_action);
2041 write_unlock_irq(&adapter->erp_lock);
2042 if (zfcp_fsf_exchange_config_data(erp_action)) {
2043 retval = ZFCP_ERP_FAILED;
2044 ZFCP_LOG_INFO("error: initiation of exchange of "
2045 "configuration data failed for "
2046 "adapter %s\n",
2047 zfcp_get_busid_by_adapter(adapter));
2048 break;
2049 }
2050 ZFCP_LOG_DEBUG("Xchange underway\n");
2051
2052 /*
2053 * Why this works:
2054 * Both the normal completion handler as well as the timeout
2055 * handler will do an 'up' when the 'exchange config data'
2056 * request completes or times out. Thus, the signal to go on
2057 * won't be lost utilizing this semaphore.
2058 * Furthermore, this 'adapter_reopen' action is
2059 * guaranteed to be the only action being there (highest action
2060 * which prevents other actions from being created).
2061 * Resulting from that, the wake signal recognized here
2062 * _must_ be the one belonging to the 'exchange config
2063 * data' request.
2064 */
2065 zfcp_rec_dbf_event_thread(6, adapter, 1);
2066 down(&adapter->erp_ready_sem);
2067 zfcp_rec_dbf_event_thread(7, adapter, 1);
2068 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2069 ZFCP_LOG_INFO("error: exchange of configuration data "
2070 "for adapter %s timed out\n",
2071 zfcp_get_busid_by_adapter(adapter));
2072 break;
2073 }
2074
2075 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2076 &adapter->status))
2077 break;
2078 794
2079 ZFCP_LOG_DEBUG("host connection still initialising... " 795static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
2080 "waiting and retrying...\n"); 796{
2081 /* sleep a little bit before retry */ 797 struct zfcp_port *port = erp_action->port;
2082 ssleep(sleep); 798 int status = atomic_read(&port->status);
2083 sleep *= 2;
2084 }
2085 799
2086 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 800 switch (erp_action->step) {
2087 &adapter->status); 801 case ZFCP_ERP_STEP_UNINITIALIZED:
802 zfcp_erp_port_strategy_clearstati(port);
803 if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) &&
804 (status & ZFCP_STATUS_COMMON_OPEN))
805 return zfcp_erp_port_forced_strategy_close(erp_action);
806 else
807 return ZFCP_ERP_FAILED;
2088 808
2089 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 809 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2090 &adapter->status)) { 810 if (status & ZFCP_STATUS_PORT_PHYS_OPEN)
2091 ZFCP_LOG_INFO("error: exchange of configuration data for " 811 return ZFCP_ERP_SUCCEEDED;
2092 "adapter %s failed\n",
2093 zfcp_get_busid_by_adapter(adapter));
2094 retval = ZFCP_ERP_FAILED;
2095 } 812 }
2096 813 return ZFCP_ERP_FAILED;
2097 return retval;
2098} 814}
2099 815
2100static int 816static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
2101zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2102{ 817{
2103 int ret; 818 int retval;
2104 struct zfcp_adapter *adapter;
2105
2106 adapter = erp_action->adapter;
2107 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2108
2109 write_lock_irq(&adapter->erp_lock);
2110 zfcp_erp_action_to_running(erp_action);
2111 write_unlock_irq(&adapter->erp_lock);
2112 819
2113 ret = zfcp_fsf_exchange_port_data(erp_action); 820 retval = zfcp_fsf_close_port(erp_action);
2114 if (ret == -EOPNOTSUPP) { 821 if (retval == -ENOMEM)
2115 return ZFCP_ERP_SUCCEEDED; 822 return ZFCP_ERP_NOMEM;
2116 } else if (ret) { 823 erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
824 if (retval)
2117 return ZFCP_ERP_FAILED; 825 return ZFCP_ERP_FAILED;
2118 } 826 return ZFCP_ERP_CONTINUES;
2119
2120 ret = ZFCP_ERP_SUCCEEDED;
2121 zfcp_rec_dbf_event_thread(8, adapter, 1);
2122 down(&adapter->erp_ready_sem);
2123 zfcp_rec_dbf_event_thread(9, adapter, 1);
2124 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2125 ZFCP_LOG_INFO("error: exchange port data timed out (adapter "
2126 "%s)\n", zfcp_get_busid_by_adapter(adapter));
2127 ret = ZFCP_ERP_FAILED;
2128 }
2129
2130 /* don't treat as error for the sake of compatibility */
2131 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status))
2132 ZFCP_LOG_INFO("warning: exchange port data failed (adapter "
2133 "%s\n", zfcp_get_busid_by_adapter(adapter));
2134
2135 return ret;
2136} 827}
2137 828
2138static int 829static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
2139zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2140 *erp_action)
2141{ 830{
2142 int retval = ZFCP_ERP_SUCCEEDED; 831 int retval;
2143 int temp_ret;
2144 struct zfcp_adapter *adapter = erp_action->adapter;
2145 int i;
2146
2147 adapter->status_read_failed = 0;
2148 for (i = 0; i < ZFCP_STATUS_READS_RECOM; i++) {
2149 temp_ret = zfcp_fsf_status_read(adapter, ZFCP_WAIT_FOR_SBAL);
2150 if (temp_ret < 0) {
2151 ZFCP_LOG_INFO("error: set-up of unsolicited status "
2152 "notification failed on adapter %s\n",
2153 zfcp_get_busid_by_adapter(adapter));
2154 retval = ZFCP_ERP_FAILED;
2155 i--;
2156 break;
2157 }
2158 }
2159 832
2160 return retval; 833 retval = zfcp_fsf_open_port(erp_action);
834 if (retval == -ENOMEM)
835 return ZFCP_ERP_NOMEM;
836 erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
837 if (retval)
838 return ZFCP_ERP_FAILED;
839 return ZFCP_ERP_CONTINUES;
2161} 840}
2162 841
2163/* 842static void zfcp_erp_port_strategy_open_ns_wake(struct zfcp_erp_action *ns_act)
2164 * function:
2165 *
2166 * purpose: this routine executes the 'Reopen Physical Port' action
2167 *
2168 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2169 * ZFCP_ERP_SUCCEEDED - action finished successfully
2170 * ZFCP_ERP_FAILED - action finished unsuccessfully
2171 */
2172static int
2173zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
2174{ 843{
2175 int retval = ZFCP_ERP_FAILED; 844 unsigned long flags;
2176 struct zfcp_port *port = erp_action->port; 845 struct zfcp_adapter *adapter = ns_act->adapter;
2177 846 struct zfcp_erp_action *act, *tmp;
2178 switch (erp_action->step) { 847 int status;
2179
2180 /*
2181 * FIXME:
2182 * the ULP spec. begs for waiting for oustanding commands
2183 */
2184 case ZFCP_ERP_STEP_UNINITIALIZED:
2185 zfcp_erp_port_strategy_clearstati(port);
2186 /*
2187 * it would be sufficient to test only the normal open flag
2188 * since the phys. open flag cannot be set if the normal
2189 * open flag is unset - however, this is for readabilty ...
2190 */
2191 if (atomic_test_mask((ZFCP_STATUS_PORT_PHYS_OPEN |
2192 ZFCP_STATUS_COMMON_OPEN),
2193 &port->status)) {
2194 ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
2195 "close physical\n", port->wwpn);
2196 retval =
2197 zfcp_erp_port_forced_strategy_close(erp_action);
2198 } else
2199 retval = ZFCP_ERP_FAILED;
2200 break;
2201 848
2202 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 849 read_lock_irqsave(&adapter->erp_lock, flags);
2203 if (atomic_test_mask(ZFCP_STATUS_PORT_PHYS_OPEN, 850 list_for_each_entry_safe(act, tmp, &adapter->erp_running_head, list) {
2204 &port->status)) { 851 if (act->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
2205 ZFCP_LOG_DEBUG("close physical failed for port " 852 status = atomic_read(&adapter->nameserver_port->status);
2206 "0x%016Lx\n", port->wwpn); 853 if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
2207 retval = ZFCP_ERP_FAILED; 854 zfcp_erp_port_failed(act->port, 27, NULL);
2208 } else 855 zfcp_erp_action_ready(act);
2209 retval = ZFCP_ERP_SUCCEEDED; 856 }
2210 break;
2211 } 857 }
2212 858 read_unlock_irqrestore(&adapter->erp_lock, flags);
2213 return retval;
2214} 859}
2215 860
2216/* 861static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *act)
2217 * function:
2218 *
2219 * purpose: this routine executes the 'Reopen Port' action
2220 *
2221 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2222 * ZFCP_ERP_SUCCEEDED - action finished successfully
2223 * ZFCP_ERP_FAILED - action finished unsuccessfully
2224 */
2225static int
2226zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
2227{ 862{
2228 int retval = ZFCP_ERP_FAILED; 863 int retval;
2229 struct zfcp_port *port = erp_action->port;
2230
2231 switch (erp_action->step) {
2232 864
2233 /* 865 switch (act->step) {
2234 * FIXME:
2235 * the ULP spec. begs for waiting for oustanding commands
2236 */
2237 case ZFCP_ERP_STEP_UNINITIALIZED: 866 case ZFCP_ERP_STEP_UNINITIALIZED:
2238 zfcp_erp_port_strategy_clearstati(port); 867 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2239 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
2240 ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying "
2241 "close\n", port->wwpn);
2242 retval = zfcp_erp_port_strategy_close(erp_action);
2243 goto out;
2244 } /* else it's already closed, open it */
2245 break;
2246
2247 case ZFCP_ERP_STEP_PORT_CLOSING: 868 case ZFCP_ERP_STEP_PORT_CLOSING:
2248 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) { 869 return zfcp_erp_port_strategy_open_port(act);
2249 ZFCP_LOG_DEBUG("close failed for port 0x%016Lx\n", 870
2250 port->wwpn); 871 case ZFCP_ERP_STEP_PORT_OPENING:
872 if (atomic_read(&act->port->status) & ZFCP_STATUS_COMMON_OPEN)
873 retval = ZFCP_ERP_SUCCEEDED;
874 else
2251 retval = ZFCP_ERP_FAILED; 875 retval = ZFCP_ERP_FAILED;
2252 goto out; 876 /* this is needed anyway */
2253 } /* else it's closed now, open it */ 877 zfcp_erp_port_strategy_open_ns_wake(act);
2254 break; 878 return retval;
2255 }
2256 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
2257 retval = ZFCP_ERP_EXIT;
2258 else
2259 retval = zfcp_erp_port_strategy_open(erp_action);
2260 879
2261 out: 880 default:
2262 return retval; 881 return ZFCP_ERP_FAILED;
882 }
2263} 883}
2264 884
2265static int 885static int zfcp_erp_port_strategy_open_lookup(struct zfcp_erp_action *act)
2266zfcp_erp_port_strategy_open(struct zfcp_erp_action *erp_action)
2267{ 886{
2268 int retval; 887 int retval;
2269 888
2270 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, 889 retval = zfcp_fc_ns_gid_pn_request(act);
2271 &erp_action->port->status)) 890 if (retval == -ENOMEM)
2272 retval = zfcp_erp_port_strategy_open_nameserver(erp_action); 891 return ZFCP_ERP_NOMEM;
2273 else 892 act->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
2274 retval = zfcp_erp_port_strategy_open_common(erp_action); 893 if (retval)
2275 894 return ZFCP_ERP_FAILED;
2276 return retval; 895 return ZFCP_ERP_CONTINUES;
2277} 896}
2278 897
2279static int 898static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
2280zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action)
2281{ 899{
2282 int retval = 0; 900 struct zfcp_adapter *adapter = act->adapter;
2283 struct zfcp_adapter *adapter = erp_action->adapter; 901 struct zfcp_port *port = act->port;
2284 struct zfcp_port *port = erp_action->port;
2285 902
2286 switch (erp_action->step) { 903 if (port->wwpn != adapter->peer_wwpn) {
904 dev_err(&adapter->ccw_device->dev,
905 "Failed to open port 0x%016Lx, "
906 "Peer WWPN 0x%016Lx does not "
907 "match.\n", port->wwpn,
908 adapter->peer_wwpn);
909 zfcp_erp_port_failed(port, 25, NULL);
910 return ZFCP_ERP_FAILED;
911 }
912 port->d_id = adapter->peer_d_id;
913 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
914 return zfcp_erp_port_strategy_open_port(act);
915}
916
917static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
918{
919 struct zfcp_adapter *adapter = act->adapter;
920 struct zfcp_port *port = act->port;
921 struct zfcp_port *ns_port = adapter->nameserver_port;
922 int p_status = atomic_read(&port->status);
2287 923
924 switch (act->step) {
2288 case ZFCP_ERP_STEP_UNINITIALIZED: 925 case ZFCP_ERP_STEP_UNINITIALIZED:
2289 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 926 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
2290 case ZFCP_ERP_STEP_PORT_CLOSING: 927 case ZFCP_ERP_STEP_PORT_CLOSING:
2291 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) { 928 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
2292 if (port->wwpn != adapter->peer_wwpn) { 929 return zfcp_erp_open_ptp_port(act);
2293 ZFCP_LOG_NORMAL("Failed to open port 0x%016Lx " 930 if (!ns_port) {
2294 "on adapter %s.\nPeer WWPN " 931 dev_err(&adapter->ccw_device->dev,
2295 "0x%016Lx does not match\n", 932 "Nameserver port unavailable.\n");
2296 port->wwpn, 933 return ZFCP_ERP_FAILED;
2297 zfcp_get_busid_by_adapter(adapter),
2298 adapter->peer_wwpn);
2299 zfcp_erp_port_failed(port, 25, NULL);
2300 retval = ZFCP_ERP_FAILED;
2301 break;
2302 }
2303 port->d_id = adapter->peer_d_id;
2304 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
2305 retval = zfcp_erp_port_strategy_open_port(erp_action);
2306 break;
2307 } 934 }
2308 if (!(adapter->nameserver_port)) { 935 if (!(atomic_read(&ns_port->status) &
2309 retval = zfcp_nameserver_enqueue(adapter); 936 ZFCP_STATUS_COMMON_UNBLOCKED)) {
2310 if (retval != 0) {
2311 ZFCP_LOG_NORMAL("error: nameserver port "
2312 "unavailable for adapter %s\n",
2313 zfcp_get_busid_by_adapter(adapter));
2314 retval = ZFCP_ERP_FAILED;
2315 break;
2316 }
2317 }
2318 if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
2319 &adapter->nameserver_port->status)) {
2320 ZFCP_LOG_DEBUG("nameserver port is not open -> open "
2321 "nameserver port\n");
2322 /* nameserver port may live again */ 937 /* nameserver port may live again */
2323 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, 938 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
2324 &adapter->nameserver_port->status); 939 &ns_port->status);
2325 if (zfcp_erp_port_reopen(adapter->nameserver_port, 0, 940 if (zfcp_erp_port_reopen(ns_port, 0, 77, act) >= 0) {
2326 77, erp_action) >= 0) { 941 act->step = ZFCP_ERP_STEP_NAMESERVER_OPEN;
2327 erp_action->step = 942 return ZFCP_ERP_CONTINUES;
2328 ZFCP_ERP_STEP_NAMESERVER_OPEN; 943 }
2329 retval = ZFCP_ERP_CONTINUES; 944 return ZFCP_ERP_FAILED;
2330 } else
2331 retval = ZFCP_ERP_FAILED;
2332 break;
2333 } 945 }
2334 /* else nameserver port is already open, fall through */ 946 /* else nameserver port is already open, fall through */
2335 case ZFCP_ERP_STEP_NAMESERVER_OPEN: 947 case ZFCP_ERP_STEP_NAMESERVER_OPEN:
2336 if (!atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, 948 if (!(atomic_read(&ns_port->status) & ZFCP_STATUS_COMMON_OPEN))
2337 &adapter->nameserver_port->status)) { 949 return ZFCP_ERP_FAILED;
2338 ZFCP_LOG_DEBUG("open failed for nameserver port\n"); 950 return zfcp_erp_port_strategy_open_lookup(act);
2339 retval = ZFCP_ERP_FAILED;
2340 } else {
2341 ZFCP_LOG_DEBUG("nameserver port is open -> "
2342 "nameserver look-up for port 0x%016Lx\n",
2343 port->wwpn);
2344 retval = zfcp_erp_port_strategy_open_common_lookup
2345 (erp_action);
2346 }
2347 break;
2348 951
2349 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 952 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
2350 if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) { 953 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
2351 if (atomic_test_mask 954 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
2352 (ZFCP_STATUS_PORT_INVALID_WWPN, &port->status)) {
2353 ZFCP_LOG_DEBUG("nameserver look-up failed "
2354 "for port 0x%016Lx "
2355 "(misconfigured WWPN?)\n",
2356 port->wwpn);
2357 zfcp_erp_port_failed(port, 26, NULL); 955 zfcp_erp_port_failed(port, 26, NULL);
2358 retval = ZFCP_ERP_EXIT; 956 return ZFCP_ERP_EXIT;
2359 } else {
2360 ZFCP_LOG_DEBUG("nameserver look-up failed for "
2361 "port 0x%016Lx\n", port->wwpn);
2362 retval = ZFCP_ERP_FAILED;
2363 } 957 }
2364 } else { 958 return ZFCP_ERP_FAILED;
2365 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> "
2366 "trying open\n", port->wwpn, port->d_id);
2367 retval = zfcp_erp_port_strategy_open_port(erp_action);
2368 } 959 }
2369 break; 960 return zfcp_erp_port_strategy_open_port(act);
2370 961
2371 case ZFCP_ERP_STEP_PORT_OPENING: 962 case ZFCP_ERP_STEP_PORT_OPENING:
2372 /* D_ID might have changed during open */ 963 /* D_ID might have changed during open */
2373 if (atomic_test_mask((ZFCP_STATUS_COMMON_OPEN | 964 if ((p_status & ZFCP_STATUS_COMMON_OPEN) &&
2374 ZFCP_STATUS_PORT_DID_DID), 965 (p_status & ZFCP_STATUS_PORT_DID_DID))
2375 &port->status)) { 966 return ZFCP_ERP_SUCCEEDED;
2376 ZFCP_LOG_DEBUG("port 0x%016Lx is open\n", port->wwpn); 967 /* fall through otherwise */
2377 retval = ZFCP_ERP_SUCCEEDED;
2378 } else {
2379 ZFCP_LOG_DEBUG("open failed for port 0x%016Lx\n",
2380 port->wwpn);
2381 retval = ZFCP_ERP_FAILED;
2382 }
2383 break;
2384
2385 default:
2386 ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
2387 erp_action->step);
2388 retval = ZFCP_ERP_FAILED;
2389 } 968 }
969 return ZFCP_ERP_FAILED;
970}
2390 971
2391 return retval; 972static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *act)
973{
974 if (atomic_read(&act->port->status) & (ZFCP_STATUS_PORT_WKA))
975 return zfcp_erp_port_strategy_open_nameserver(act);
976 return zfcp_erp_port_strategy_open_common(act);
2392} 977}
2393 978
2394static int 979static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
2395zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *erp_action)
2396{ 980{
2397 int retval;
2398 struct zfcp_port *port = erp_action->port; 981 struct zfcp_port *port = erp_action->port;
2399 982
2400 switch (erp_action->step) { 983 switch (erp_action->step) {
2401
2402 case ZFCP_ERP_STEP_UNINITIALIZED: 984 case ZFCP_ERP_STEP_UNINITIALIZED:
2403 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 985 zfcp_erp_port_strategy_clearstati(port);
2404 case ZFCP_ERP_STEP_PORT_CLOSING: 986 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)
2405 ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> trying open\n", 987 return zfcp_erp_port_strategy_close(erp_action);
2406 port->wwpn, port->d_id);
2407 retval = zfcp_erp_port_strategy_open_port(erp_action);
2408 break; 988 break;
2409 989
2410 case ZFCP_ERP_STEP_PORT_OPENING: 990 case ZFCP_ERP_STEP_PORT_CLOSING:
2411 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) { 991 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)
2412 ZFCP_LOG_DEBUG("WKA port is open\n"); 992 return ZFCP_ERP_FAILED;
2413 retval = ZFCP_ERP_SUCCEEDED;
2414 } else {
2415 ZFCP_LOG_DEBUG("open failed for WKA port\n");
2416 retval = ZFCP_ERP_FAILED;
2417 }
2418 /* this is needed anyway (dont care for retval of wakeup) */
2419 ZFCP_LOG_DEBUG("continue other open port operations\n");
2420 zfcp_erp_port_strategy_open_nameserver_wakeup(erp_action);
2421 break; 993 break;
2422
2423 default:
2424 ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n",
2425 erp_action->step);
2426 retval = ZFCP_ERP_FAILED;
2427 } 994 }
995 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
996 return ZFCP_ERP_EXIT;
997 else
998 return zfcp_erp_port_strategy_open(erp_action);
2428 999
2429 return retval; 1000 return ZFCP_ERP_FAILED;
2430}
2431
2432/*
2433 * function:
2434 *
2435 * purpose: makes the erp thread continue with reopen (physical) port
2436 * actions which have been paused until the name server port
2437 * is opened (or failed)
2438 *
2439 * returns: 0 (a kind of void retval, its not used)
2440 */
2441static int
2442zfcp_erp_port_strategy_open_nameserver_wakeup(struct zfcp_erp_action
2443 *ns_erp_action)
2444{
2445 int retval = 0;
2446 unsigned long flags;
2447 struct zfcp_adapter *adapter = ns_erp_action->adapter;
2448 struct zfcp_erp_action *erp_action, *tmp;
2449
2450 read_lock_irqsave(&adapter->erp_lock, flags);
2451 list_for_each_entry_safe(erp_action, tmp, &adapter->erp_running_head,
2452 list) {
2453 if (erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
2454 if (atomic_test_mask(
2455 ZFCP_STATUS_COMMON_ERP_FAILED,
2456 &adapter->nameserver_port->status))
2457 zfcp_erp_port_failed(erp_action->port, 27,
2458 NULL);
2459 zfcp_erp_action_ready(erp_action);
2460 }
2461 }
2462 read_unlock_irqrestore(&adapter->erp_lock, flags);
2463
2464 return retval;
2465}
2466
2467/*
2468 * function:
2469 *
2470 * purpose:
2471 *
2472 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2473 * ZFCP_ERP_FAILED - action finished unsuccessfully
2474 */
2475static int
2476zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action)
2477{
2478 int retval;
2479
2480 retval = zfcp_fsf_close_physical_port(erp_action);
2481 if (retval == -ENOMEM) {
2482 retval = ZFCP_ERP_NOMEM;
2483 goto out;
2484 }
2485 erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
2486 if (retval != 0) {
2487 /* could not send 'open', fail */
2488 retval = ZFCP_ERP_FAILED;
2489 goto out;
2490 }
2491 retval = ZFCP_ERP_CONTINUES;
2492 out:
2493 return retval;
2494} 1001}
2495 1002
2496static int 1003static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
2497zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
2498{ 1004{
2499 int retval = 0;
2500
2501 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 1005 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
2502 ZFCP_STATUS_COMMON_CLOSING | 1006 ZFCP_STATUS_COMMON_CLOSING |
2503 ZFCP_STATUS_COMMON_ACCESS_DENIED | 1007 ZFCP_STATUS_COMMON_ACCESS_DENIED |
2504 ZFCP_STATUS_PORT_DID_DID | 1008 ZFCP_STATUS_UNIT_SHARED |
2505 ZFCP_STATUS_PORT_PHYS_CLOSING | 1009 ZFCP_STATUS_UNIT_READONLY,
2506 ZFCP_STATUS_PORT_INVALID_WWPN, 1010 &unit->status);
2507 &port->status);
2508 return retval;
2509}
2510
2511/*
2512 * function:
2513 *
2514 * purpose:
2515 *
2516 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2517 * ZFCP_ERP_FAILED - action finished unsuccessfully
2518 */
2519static int
2520zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
2521{
2522 int retval;
2523
2524 retval = zfcp_fsf_close_port(erp_action);
2525 if (retval == -ENOMEM) {
2526 retval = ZFCP_ERP_NOMEM;
2527 goto out;
2528 }
2529 erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
2530 if (retval != 0) {
2531 /* could not send 'close', fail */
2532 retval = ZFCP_ERP_FAILED;
2533 goto out;
2534 }
2535 retval = ZFCP_ERP_CONTINUES;
2536 out:
2537 return retval;
2538} 1011}
2539 1012
2540/* 1013static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
2541 * function:
2542 *
2543 * purpose:
2544 *
2545 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2546 * ZFCP_ERP_FAILED - action finished unsuccessfully
2547 */
2548static int
2549zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
2550{ 1014{
2551 int retval; 1015 int retval = zfcp_fsf_close_unit(erp_action);
2552 1016 if (retval == -ENOMEM)
2553 retval = zfcp_fsf_open_port(erp_action); 1017 return ZFCP_ERP_NOMEM;
2554 if (retval == -ENOMEM) { 1018 erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
2555 retval = ZFCP_ERP_NOMEM; 1019 if (retval)
2556 goto out; 1020 return ZFCP_ERP_FAILED;
2557 } 1021 return ZFCP_ERP_CONTINUES;
2558 erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
2559 if (retval != 0) {
2560 /* could not send 'open', fail */
2561 retval = ZFCP_ERP_FAILED;
2562 goto out;
2563 }
2564 retval = ZFCP_ERP_CONTINUES;
2565 out:
2566 return retval;
2567} 1022}
2568 1023
2569/* 1024static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2570 * function:
2571 *
2572 * purpose:
2573 *
2574 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2575 * ZFCP_ERP_FAILED - action finished unsuccessfully
2576 */
2577static int
2578zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action)
2579{ 1025{
2580 int retval; 1026 int retval = zfcp_fsf_open_unit(erp_action);
2581 1027 if (retval == -ENOMEM)
2582 retval = zfcp_ns_gid_pn_request(erp_action); 1028 return ZFCP_ERP_NOMEM;
2583 if (retval == -ENOMEM) { 1029 erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
2584 retval = ZFCP_ERP_NOMEM; 1030 if (retval)
2585 goto out; 1031 return ZFCP_ERP_FAILED;
2586 } 1032 return ZFCP_ERP_CONTINUES;
2587 erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
2588 if (retval != 0) {
2589 /* could not send nameserver request, fail */
2590 retval = ZFCP_ERP_FAILED;
2591 goto out;
2592 }
2593 retval = ZFCP_ERP_CONTINUES;
2594 out:
2595 return retval;
2596} 1033}
2597 1034
2598/* 1035static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
2599 * function:
2600 *
2601 * purpose: this routine executes the 'Reopen Unit' action
2602 * currently no retries
2603 *
2604 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2605 * ZFCP_ERP_SUCCEEDED - action finished successfully
2606 * ZFCP_ERP_FAILED - action finished unsuccessfully
2607 */
2608static int
2609zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
2610{ 1036{
2611 int retval = ZFCP_ERP_FAILED;
2612 struct zfcp_unit *unit = erp_action->unit; 1037 struct zfcp_unit *unit = erp_action->unit;
2613 1038
2614 switch (erp_action->step) { 1039 switch (erp_action->step) {
2615
2616 /*
2617 * FIXME:
2618 * the ULP spec. begs for waiting for oustanding commands
2619 */
2620 case ZFCP_ERP_STEP_UNINITIALIZED: 1040 case ZFCP_ERP_STEP_UNINITIALIZED:
2621 zfcp_erp_unit_strategy_clearstati(unit); 1041 zfcp_erp_unit_strategy_clearstati(unit);
2622 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) { 1042 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
2623 ZFCP_LOG_DEBUG("unit 0x%016Lx is open -> " 1043 return zfcp_erp_unit_strategy_close(erp_action);
2624 "trying close\n", unit->fcp_lun); 1044 /* already closed, fall through */
2625 retval = zfcp_erp_unit_strategy_close(erp_action);
2626 break;
2627 }
2628 /* else it's already closed, fall through */
2629 case ZFCP_ERP_STEP_UNIT_CLOSING: 1045 case ZFCP_ERP_STEP_UNIT_CLOSING:
2630 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) { 1046 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
2631 ZFCP_LOG_DEBUG("close failed for unit 0x%016Lx\n", 1047 return ZFCP_ERP_FAILED;
2632 unit->fcp_lun); 1048 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
2633 retval = ZFCP_ERP_FAILED; 1049 return ZFCP_ERP_EXIT;
2634 } else { 1050 return zfcp_erp_unit_strategy_open(erp_action);
2635 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
2636 retval = ZFCP_ERP_EXIT;
2637 else {
2638 ZFCP_LOG_DEBUG("unit 0x%016Lx is not open -> "
2639 "trying open\n", unit->fcp_lun);
2640 retval =
2641 zfcp_erp_unit_strategy_open(erp_action);
2642 }
2643 }
2644 break;
2645 1051
2646 case ZFCP_ERP_STEP_UNIT_OPENING: 1052 case ZFCP_ERP_STEP_UNIT_OPENING:
2647 if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) { 1053 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
2648 ZFCP_LOG_DEBUG("unit 0x%016Lx is open\n", 1054 return ZFCP_ERP_SUCCEEDED;
2649 unit->fcp_lun);
2650 retval = ZFCP_ERP_SUCCEEDED;
2651 } else {
2652 ZFCP_LOG_DEBUG("open failed for unit 0x%016Lx\n",
2653 unit->fcp_lun);
2654 retval = ZFCP_ERP_FAILED;
2655 }
2656 break;
2657 } 1055 }
2658 1056 return ZFCP_ERP_FAILED;
2659 return retval;
2660} 1057}
2661 1058
2662static int 1059static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
2663zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
2664{ 1060{
2665 int retval = 0; 1061 switch (result) {
2666 1062 case ZFCP_ERP_SUCCEEDED :
2667 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 1063 atomic_set(&unit->erp_counter, 0);
2668 ZFCP_STATUS_COMMON_CLOSING | 1064 zfcp_erp_unit_unblock(unit);
2669 ZFCP_STATUS_COMMON_ACCESS_DENIED | 1065 break;
2670 ZFCP_STATUS_UNIT_SHARED | 1066 case ZFCP_ERP_FAILED :
2671 ZFCP_STATUS_UNIT_READONLY, 1067 atomic_inc(&unit->erp_counter);
2672 &unit->status); 1068 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
1069 zfcp_erp_unit_failed(unit, 21, NULL);
1070 break;
1071 }
2673 1072
2674 return retval; 1073 if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
1074 zfcp_erp_unit_block(unit, 0);
1075 result = ZFCP_ERP_EXIT;
1076 }
1077 return result;
2675} 1078}
2676 1079
2677/* 1080static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
2678 * function:
2679 *
2680 * purpose:
2681 *
2682 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2683 * ZFCP_ERP_FAILED - action finished unsuccessfully
2684 */
2685static int
2686zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
2687{ 1081{
2688 int retval; 1082 switch (result) {
1083 case ZFCP_ERP_SUCCEEDED :
1084 atomic_set(&port->erp_counter, 0);
1085 zfcp_erp_port_unblock(port);
1086 break;
2689 1087
2690 retval = zfcp_fsf_close_unit(erp_action); 1088 case ZFCP_ERP_FAILED :
2691 if (retval == -ENOMEM) { 1089 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) {
2692 retval = ZFCP_ERP_NOMEM; 1090 zfcp_erp_port_block(port, 0);
2693 goto out; 1091 result = ZFCP_ERP_EXIT;
2694 } 1092 }
2695 erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; 1093 atomic_inc(&port->erp_counter);
2696 if (retval != 0) { 1094 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
2697 /* could not send 'close', fail */ 1095 zfcp_erp_port_failed(port, 22, NULL);
2698 retval = ZFCP_ERP_FAILED; 1096 break;
2699 goto out;
2700 } 1097 }
2701 retval = ZFCP_ERP_CONTINUES;
2702 1098
2703 out: 1099 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
2704 return retval; 1100 zfcp_erp_port_block(port, 0);
1101 result = ZFCP_ERP_EXIT;
1102 }
1103 return result;
2705} 1104}
2706 1105
2707/* 1106static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
2708 * function: 1107 int result)
2709 *
2710 * purpose:
2711 *
2712 * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
2713 * ZFCP_ERP_FAILED - action finished unsuccessfully
2714 */
2715static int
2716zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2717{ 1108{
2718 int retval; 1109 switch (result) {
1110 case ZFCP_ERP_SUCCEEDED :
1111 atomic_set(&adapter->erp_counter, 0);
1112 zfcp_erp_adapter_unblock(adapter);
1113 break;
2719 1114
2720 retval = zfcp_fsf_open_unit(erp_action); 1115 case ZFCP_ERP_FAILED :
2721 if (retval == -ENOMEM) { 1116 atomic_inc(&adapter->erp_counter);
2722 retval = ZFCP_ERP_NOMEM; 1117 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
2723 goto out; 1118 zfcp_erp_adapter_failed(adapter, 23, NULL);
2724 } 1119 break;
2725 erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
2726 if (retval != 0) {
2727 /* could not send 'open', fail */
2728 retval = ZFCP_ERP_FAILED;
2729 goto out;
2730 } 1120 }
2731 retval = ZFCP_ERP_CONTINUES;
2732 out:
2733 return retval;
2734}
2735 1121
2736void zfcp_erp_start_timer(struct zfcp_fsf_req *fsf_req) 1122 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
2737{ 1123 zfcp_erp_adapter_block(adapter, 0);
2738 BUG_ON(!fsf_req->erp_action); 1124 result = ZFCP_ERP_EXIT;
2739 fsf_req->timer.function = zfcp_erp_timeout_handler; 1125 }
2740 fsf_req->timer.data = (unsigned long) fsf_req->erp_action; 1126 return result;
2741 fsf_req->timer.expires = jiffies + ZFCP_ERP_FSFREQ_TIMEOUT;
2742 add_timer(&fsf_req->timer);
2743} 1127}
2744 1128
2745/* 1129static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
2746 * function: 1130 int result)
2747 *
2748 * purpose: enqueue the specified error recovery action, if needed
2749 *
2750 * returns:
2751 */
2752static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
2753 struct zfcp_port *port,
2754 struct zfcp_unit *unit, u8 id, void *ref)
2755{ 1131{
2756 int retval = 1, need = want; 1132 struct zfcp_adapter *adapter = erp_action->adapter;
2757 struct zfcp_erp_action *erp_action = NULL; 1133 struct zfcp_port *port = erp_action->port;
2758 u32 status = 0; 1134 struct zfcp_unit *unit = erp_action->unit;
2759 1135
2760 /* 1136 switch (erp_action->action) {
2761 * We need some rules here which check whether we really need
2762 * this action or whether we should just drop it.
2763 * E.g. if there is a unfinished 'Reopen Port' request then we drop a
2764 * 'Reopen Unit' request for an associated unit since we can't
2765 * satisfy this request now. A 'Reopen Port' action will trigger
2766 * 'Reopen Unit' actions when it completes.
2767 * Thus, there are only actions in the queue which can immediately be
2768 * executed. This makes the processing of the action queue more
2769 * efficient.
2770 */
2771
2772 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
2773 &adapter->status))
2774 return -EIO;
2775 1137
2776 /* check whether we really need this */
2777 switch (want) {
2778 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1138 case ZFCP_ERP_ACTION_REOPEN_UNIT:
2779 if (atomic_test_mask 1139 result = zfcp_erp_strategy_check_unit(unit, result);
2780 (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) { 1140 break;
2781 goto out;
2782 }
2783 if (!atomic_test_mask
2784 (ZFCP_STATUS_COMMON_RUNNING, &port->status) ||
2785 atomic_test_mask
2786 (ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
2787 goto out;
2788 }
2789 if (!atomic_test_mask
2790 (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
2791 need = ZFCP_ERP_ACTION_REOPEN_PORT;
2792 /* fall through !!! */
2793
2794 case ZFCP_ERP_ACTION_REOPEN_PORT:
2795 if (atomic_test_mask
2796 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) {
2797 goto out;
2798 }
2799 /* fall through !!! */
2800 1141
2801 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1142 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2802 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1143 case ZFCP_ERP_ACTION_REOPEN_PORT:
2803 &port->status)) { 1144 result = zfcp_erp_strategy_check_port(port, result);
2804 if (port->erp_action.action != 1145 break;
2805 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
2806 ZFCP_LOG_INFO("dropped erp action %i (port "
2807 "0x%016Lx, action in use: %i)\n",
2808 want, port->wwpn,
2809 port->erp_action.action);
2810 }
2811 goto out;
2812 }
2813 if (!atomic_test_mask
2814 (ZFCP_STATUS_COMMON_RUNNING, &adapter->status) ||
2815 atomic_test_mask
2816 (ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
2817 goto out;
2818 }
2819 if (!atomic_test_mask
2820 (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
2821 need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
2822 /* fall through !!! */
2823 1146
2824 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1147 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
2825 if (atomic_test_mask 1148 result = zfcp_erp_strategy_check_adapter(adapter, result);
2826 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) {
2827 goto out;
2828 }
2829 break; 1149 break;
2830
2831 default:
2832 ZFCP_LOG_NORMAL("bug: unknown erp action requested "
2833 "on adapter %s (action=%d)\n",
2834 zfcp_get_busid_by_adapter(adapter), want);
2835 goto out;
2836 } 1150 }
1151 return result;
1152}
2837 1153
2838 /* check whether we need something stronger first */ 1154static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
2839 if (need) { 1155{
2840 ZFCP_LOG_DEBUG("stronger erp action %d needed before " 1156 int status = atomic_read(target_status);
2841 "erp action %d on adapter %s\n",
2842 need, want, zfcp_get_busid_by_adapter(adapter));
2843 }
2844 1157
2845 /* mark adapter to have some error recovery pending */ 1158 if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
2846 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); 1159 (erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
1160 return 1; /* take it online */
2847 1161
2848 /* setup error recovery action */ 1162 if (!(status & ZFCP_STATUS_COMMON_RUNNING) &&
2849 switch (need) { 1163 !(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY))
1164 return 1; /* take it offline */
2850 1165
2851 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1166 return 0;
2852 zfcp_unit_get(unit); 1167}
2853 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); 1168
2854 erp_action = &unit->erp_action; 1169static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
2855 if (!atomic_test_mask 1170{
2856 (ZFCP_STATUS_COMMON_RUNNING, &unit->status)) 1171 int action = act->action;
2857 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 1172 struct zfcp_adapter *adapter = act->adapter;
1173 struct zfcp_port *port = act->port;
1174 struct zfcp_unit *unit = act->unit;
1175 u32 erp_status = act->status;
1176
1177 switch (action) {
1178 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1179 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
1180 _zfcp_erp_adapter_reopen(adapter,
1181 ZFCP_STATUS_COMMON_ERP_FAILED,
1182 67, NULL);
1183 return ZFCP_ERP_EXIT;
1184 }
2858 break; 1185 break;
2859 1186
2860 case ZFCP_ERP_ACTION_REOPEN_PORT:
2861 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1187 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2862 zfcp_port_get(port); 1188 case ZFCP_ERP_ACTION_REOPEN_PORT:
2863 zfcp_erp_action_dismiss_port(port); 1189 if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
2864 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 1190 _zfcp_erp_port_reopen(port,
2865 erp_action = &port->erp_action; 1191 ZFCP_STATUS_COMMON_ERP_FAILED,
2866 if (!atomic_test_mask 1192 68, NULL);
2867 (ZFCP_STATUS_COMMON_RUNNING, &port->status)) 1193 return ZFCP_ERP_EXIT;
2868 status = ZFCP_STATUS_ERP_CLOSE_ONLY; 1194 }
2869 break; 1195 break;
2870 1196
2871 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1197 case ZFCP_ERP_ACTION_REOPEN_UNIT:
2872 zfcp_adapter_get(adapter); 1198 if (zfcp_erp_strat_change_det(&unit->status, erp_status)) {
2873 zfcp_erp_action_dismiss_adapter(adapter); 1199 _zfcp_erp_unit_reopen(unit,
2874 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 1200 ZFCP_STATUS_COMMON_ERP_FAILED,
2875 erp_action = &adapter->erp_action; 1201 69, NULL);
2876 if (!atomic_test_mask 1202 return ZFCP_ERP_EXIT;
2877 (ZFCP_STATUS_COMMON_RUNNING, &adapter->status)) 1203 }
2878 status = ZFCP_STATUS_ERP_CLOSE_ONLY;
2879 break; 1204 break;
2880 } 1205 }
2881 1206 return ret;
2882 memset(erp_action, 0, sizeof (struct zfcp_erp_action));
2883 erp_action->adapter = adapter;
2884 erp_action->port = port;
2885 erp_action->unit = unit;
2886 erp_action->action = need;
2887 erp_action->status = status;
2888
2889 ++adapter->erp_total_count;
2890
2891 /* finally put it into 'ready' queue and kick erp thread */
2892 list_add_tail(&erp_action->list, &adapter->erp_ready_head);
2893 up(&adapter->erp_ready_sem);
2894 zfcp_rec_dbf_event_thread(1, adapter, 0);
2895 retval = 0;
2896 out:
2897 zfcp_rec_dbf_event_trigger(id, ref, want, need, erp_action,
2898 adapter, port, unit);
2899 return retval;
2900} 1207}
2901 1208
2902static int 1209static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
2903zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
2904{ 1210{
2905 int retval = 0;
2906 struct zfcp_adapter *adapter = erp_action->adapter; 1211 struct zfcp_adapter *adapter = erp_action->adapter;
2907 1212
2908 --adapter->erp_total_count; 1213 adapter->erp_total_count--;
2909 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { 1214 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
2910 --adapter->erp_low_mem_count; 1215 adapter->erp_low_mem_count--;
2911 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; 1216 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
2912 } 1217 }
2913 1218
@@ -2919,141 +1224,458 @@ zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
2919 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1224 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
2920 &erp_action->unit->status); 1225 &erp_action->unit->status);
2921 break; 1226 break;
1227
2922 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1228 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2923 case ZFCP_ERP_ACTION_REOPEN_PORT: 1229 case ZFCP_ERP_ACTION_REOPEN_PORT:
2924 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1230 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
2925 &erp_action->port->status); 1231 &erp_action->port->status);
2926 break; 1232 break;
1233
2927 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1234 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
2928 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1235 atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
2929 &erp_action->adapter->status); 1236 &erp_action->adapter->status);
2930 break; 1237 break;
2931 default:
2932 /* bug */
2933 break;
2934 } 1238 }
2935 return retval;
2936} 1239}
2937 1240
2938/** 1241struct zfcp_erp_add_work {
2939 * zfcp_erp_action_cleanup 1242 struct zfcp_unit *unit;
2940 * 1243 struct work_struct work;
2941 * Register unit with scsi stack if appropriate and fix reference counts. 1244};
2942 * Note: Temporary units are not registered with scsi stack. 1245
2943 */ 1246static void zfcp_erp_scsi_scan(struct work_struct *work)
2944static void
2945zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
2946 struct zfcp_port *port, struct zfcp_unit *unit,
2947 int result)
2948{ 1247{
2949 switch (action) { 1248 struct zfcp_erp_add_work *p =
1249 container_of(work, struct zfcp_erp_add_work, work);
1250 struct zfcp_unit *unit = p->unit;
1251 struct fc_rport *rport = unit->port->rport;
1252 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1253 unit->scsi_lun, 0);
1254 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1255 zfcp_unit_put(unit);
1256 kfree(p);
1257}
1258
1259static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
1260{
1261 struct zfcp_erp_add_work *p;
1262
1263 p = kzalloc(sizeof(*p), GFP_KERNEL);
1264 if (!p) {
1265 dev_err(&unit->port->adapter->ccw_device->dev,
1266 "Out of resources. Could not register unit "
1267 "0x%016Lx on port 0x%016Lx with SCSI stack.\n",
1268 unit->fcp_lun, unit->port->wwpn);
1269 return;
1270 }
1271
1272 zfcp_unit_get(unit);
1273 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1274 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1275 p->unit = unit;
1276 schedule_work(&p->work);
1277}
1278
1279static void zfcp_erp_rport_register(struct zfcp_port *port)
1280{
1281 struct fc_rport_identifiers ids;
1282 ids.node_name = port->wwnn;
1283 ids.port_name = port->wwpn;
1284 ids.port_id = port->d_id;
1285 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
1286 port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
1287 if (!port->rport) {
1288 dev_err(&port->adapter->ccw_device->dev,
1289 "Failed registration of rport "
1290 "0x%016Lx.\n", port->wwpn);
1291 return;
1292 }
1293
1294 scsi_target_unblock(&port->rport->dev);
1295 port->rport->maxframe_size = port->maxframe_size;
1296 port->rport->supported_classes = port->supported_classes;
1297}
1298
1299static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
1300{
1301 struct zfcp_port *port;
1302 list_for_each_entry(port, &adapter->port_list_head, list)
1303 if (port->rport && !(atomic_read(&port->status) &
1304 ZFCP_STATUS_PORT_WKA)) {
1305 fc_remote_port_delete(port->rport);
1306 port->rport = NULL;
1307 }
1308}
1309
1310static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1311{
1312 struct zfcp_adapter *adapter = act->adapter;
1313 struct zfcp_port *port = act->port;
1314 struct zfcp_unit *unit = act->unit;
1315
1316 switch (act->action) {
2950 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1317 case ZFCP_ERP_ACTION_REOPEN_UNIT:
2951 if ((result == ZFCP_ERP_SUCCEEDED) 1318 if ((result == ZFCP_ERP_SUCCEEDED) &&
2952 && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY, 1319 !unit->device && port->rport) {
2953 &unit->status))
2954 && !unit->device
2955 && port->rport) {
2956 atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED, 1320 atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED,
2957 &unit->status); 1321 &unit->status);
2958 if (atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, 1322 if (!(atomic_read(&unit->status) &
2959 &unit->status) == 0) 1323 ZFCP_STATUS_UNIT_SCSI_WORK_PENDING))
2960 zfcp_erp_schedule_work(unit); 1324 zfcp_erp_schedule_work(unit);
2961 } 1325 }
2962 zfcp_unit_put(unit); 1326 zfcp_unit_put(unit);
2963 break; 1327 break;
1328
2964 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1329 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2965 case ZFCP_ERP_ACTION_REOPEN_PORT: 1330 case ZFCP_ERP_ACTION_REOPEN_PORT:
2966 if (atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, 1331 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN) {
2967 &port->status)) {
2968 zfcp_port_put(port); 1332 zfcp_port_put(port);
2969 break; 1333 return;
2970 }
2971
2972 if ((result == ZFCP_ERP_SUCCEEDED)
2973 && !port->rport) {
2974 struct fc_rport_identifiers ids;
2975 ids.node_name = port->wwnn;
2976 ids.port_name = port->wwpn;
2977 ids.port_id = port->d_id;
2978 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
2979 port->rport =
2980 fc_remote_port_add(adapter->scsi_host, 0, &ids);
2981 if (!port->rport)
2982 ZFCP_LOG_NORMAL("failed registration of rport"
2983 "(adapter %s, wwpn=0x%016Lx)\n",
2984 zfcp_get_busid_by_port(port),
2985 port->wwpn);
2986 else {
2987 scsi_target_unblock(&port->rport->dev);
2988 port->rport->maxframe_size = port->maxframe_size;
2989 port->rport->supported_classes =
2990 port->supported_classes;
2991 }
2992 } 1334 }
1335 if ((result == ZFCP_ERP_SUCCEEDED) && !port->rport)
1336 zfcp_erp_rport_register(port);
2993 if ((result != ZFCP_ERP_SUCCEEDED) && port->rport) { 1337 if ((result != ZFCP_ERP_SUCCEEDED) && port->rport) {
2994 fc_remote_port_delete(port->rport); 1338 fc_remote_port_delete(port->rport);
2995 port->rport = NULL; 1339 port->rport = NULL;
2996 } 1340 }
2997 zfcp_port_put(port); 1341 zfcp_port_put(port);
2998 break; 1342 break;
1343
2999 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1344 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3000 if (result != ZFCP_ERP_SUCCEEDED) { 1345 if (result != ZFCP_ERP_SUCCEEDED)
3001 list_for_each_entry(port, &adapter->port_list_head, list) 1346 zfcp_erp_rports_del(adapter);
3002 if (port->rport &&
3003 !atomic_test_mask(ZFCP_STATUS_PORT_WKA,
3004 &port->status)) {
3005 fc_remote_port_delete(port->rport);
3006 port->rport = NULL;
3007 }
3008 }
3009 zfcp_adapter_put(adapter); 1347 zfcp_adapter_put(adapter);
3010 break; 1348 break;
3011 default:
3012 break;
3013 } 1349 }
3014} 1350}
3015 1351
1352static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1353{
1354 switch (erp_action->action) {
1355 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1356 return zfcp_erp_adapter_strategy(erp_action);
1357 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1358 return zfcp_erp_port_forced_strategy(erp_action);
1359 case ZFCP_ERP_ACTION_REOPEN_PORT:
1360 return zfcp_erp_port_strategy(erp_action);
1361 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1362 return zfcp_erp_unit_strategy(erp_action);
1363 }
1364 return ZFCP_ERP_FAILED;
1365}
3016 1366
3017static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 1367static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
3018{ 1368{
3019 struct zfcp_port *port; 1369 int retval;
1370 struct zfcp_adapter *adapter = erp_action->adapter;
1371 unsigned long flags;
3020 1372
3021 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) 1373 read_lock_irqsave(&zfcp_data.config_lock, flags);
3022 zfcp_erp_action_dismiss(&adapter->erp_action); 1374 write_lock(&adapter->erp_lock);
3023 else 1375
3024 list_for_each_entry(port, &adapter->port_list_head, list) 1376 zfcp_erp_strategy_check_fsfreq(erp_action);
3025 zfcp_erp_action_dismiss_port(port); 1377
1378 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
1379 zfcp_erp_action_dequeue(erp_action);
1380 retval = ZFCP_ERP_DISMISSED;
1381 goto unlock;
1382 }
1383
1384 zfcp_erp_action_to_running(erp_action);
1385
1386 /* no lock to allow for blocking operations */
1387 write_unlock(&adapter->erp_lock);
1388 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1389 retval = zfcp_erp_strategy_do_action(erp_action);
1390 read_lock_irqsave(&zfcp_data.config_lock, flags);
1391 write_lock(&adapter->erp_lock);
1392
1393 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
1394 retval = ZFCP_ERP_CONTINUES;
1395
1396 switch (retval) {
1397 case ZFCP_ERP_NOMEM:
1398 if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
1399 ++adapter->erp_low_mem_count;
1400 erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
1401 }
1402 if (adapter->erp_total_count == adapter->erp_low_mem_count)
1403 _zfcp_erp_adapter_reopen(adapter, 0, 66, NULL);
1404 else {
1405 zfcp_erp_strategy_memwait(erp_action);
1406 retval = ZFCP_ERP_CONTINUES;
1407 }
1408 goto unlock;
1409
1410 case ZFCP_ERP_CONTINUES:
1411 if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
1412 --adapter->erp_low_mem_count;
1413 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
1414 }
1415 goto unlock;
1416 }
1417
1418 retval = zfcp_erp_strategy_check_target(erp_action, retval);
1419 zfcp_erp_action_dequeue(erp_action);
1420 retval = zfcp_erp_strategy_statechange(erp_action, retval);
1421 if (retval == ZFCP_ERP_EXIT)
1422 goto unlock;
1423 zfcp_erp_strategy_followup_actions(erp_action);
1424
1425 unlock:
1426 write_unlock(&adapter->erp_lock);
1427 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1428
1429 if (retval != ZFCP_ERP_CONTINUES)
1430 zfcp_erp_action_cleanup(erp_action, retval);
1431
1432 return retval;
3026} 1433}
3027 1434
3028static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) 1435static int zfcp_erp_thread(void *data)
3029{ 1436{
3030 struct zfcp_unit *unit; 1437 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
1438 struct list_head *next;
1439 struct zfcp_erp_action *act;
1440 unsigned long flags;
3031 1441
3032 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) 1442 daemonize("zfcperp%s", adapter->ccw_device->dev.bus_id);
3033 zfcp_erp_action_dismiss(&port->erp_action); 1443 /* Block all signals */
1444 siginitsetinv(&current->blocked, 0);
1445 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1446 wake_up(&adapter->erp_thread_wqh);
1447
1448 while (!(atomic_read(&adapter->status) &
1449 ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) {
1450 write_lock_irqsave(&adapter->erp_lock, flags);
1451 next = adapter->erp_ready_head.next;
1452 write_unlock_irqrestore(&adapter->erp_lock, flags);
1453
1454 if (next != &adapter->erp_ready_head) {
1455 act = list_entry(next, struct zfcp_erp_action, list);
1456
1457 /* there is more to come after dismission, no notify */
1458 if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
1459 zfcp_erp_wakeup(adapter);
1460 }
1461
1462 zfcp_rec_dbf_event_thread(4, adapter);
1463 down_interruptible(&adapter->erp_ready_sem);
1464 zfcp_rec_dbf_event_thread(5, adapter);
1465 }
1466
1467 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1468 wake_up(&adapter->erp_thread_wqh);
1469
1470 return 0;
1471}
1472
1473/**
1474 * zfcp_erp_thread_setup - Start ERP thread for adapter
1475 * @adapter: Adapter to start the ERP thread for
1476 *
1477 * Returns 0 on success or error code from kernel_thread()
1478 */
1479int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
1480{
1481 int retval;
1482
1483 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1484 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
1485 if (retval < 0) {
1486 dev_err(&adapter->ccw_device->dev,
1487 "Creation of ERP thread failed.\n");
1488 return retval;
1489 }
1490 wait_event(adapter->erp_thread_wqh,
1491 atomic_read(&adapter->status) &
1492 ZFCP_STATUS_ADAPTER_ERP_THREAD_UP);
1493 return 0;
1494}
1495
1496/**
1497 * zfcp_erp_thread_kill - Stop ERP thread.
1498 * @adapter: Adapter where the ERP thread should be stopped.
1499 *
1500 * The caller of this routine ensures that the specified adapter has
1501 * been shut down and that this operation has been completed. Thus,
1502 * there are no pending erp_actions which would need to be handled
1503 * here.
1504 */
1505void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1506{
1507 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
1508 up(&adapter->erp_ready_sem);
1509 zfcp_rec_dbf_event_thread_lock(2, adapter);
1510
1511 wait_event(adapter->erp_thread_wqh,
1512 !(atomic_read(&adapter->status) &
1513 ZFCP_STATUS_ADAPTER_ERP_THREAD_UP));
1514
1515 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
1516 &adapter->status);
1517}
1518
1519/**
1520 * zfcp_erp_adapter_failed - Set adapter status to failed.
1521 * @adapter: Failed adapter.
1522 * @id: Event id for debug trace.
1523 * @ref: Reference for debug trace.
1524 */
1525void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
1526{
1527 zfcp_erp_modify_adapter_status(adapter, id, ref,
1528 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1529 dev_err(&adapter->ccw_device->dev, "Adapter ERP failed.\n");
1530}
1531
1532/**
1533 * zfcp_erp_port_failed - Set port status to failed.
1534 * @port: Failed port.
1535 * @id: Event id for debug trace.
1536 * @ref: Reference for debug trace.
1537 */
1538void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
1539{
1540 zfcp_erp_modify_port_status(port, id, ref,
1541 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1542
1543 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
1544 dev_err(&port->adapter->ccw_device->dev,
1545 "Port ERP failed for WKA port d_id=0x%06x.\n",
1546 port->d_id);
3034 else 1547 else
3035 list_for_each_entry(unit, &port->unit_list_head, list) 1548 dev_err(&port->adapter->ccw_device->dev,
3036 zfcp_erp_action_dismiss_unit(unit); 1549 "Port ERP failed for port wwpn=0x%016Lx.\n",
1550 port->wwpn);
3037} 1551}
3038 1552
3039static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) 1553/**
1554 * zfcp_erp_unit_failed - Set unit status to failed.
1555 * @unit: Failed unit.
1556 * @id: Event id for debug trace.
1557 * @ref: Reference for debug trace.
1558 */
1559void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
3040{ 1560{
3041 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) 1561 zfcp_erp_modify_unit_status(unit, id, ref,
3042 zfcp_erp_action_dismiss(&unit->erp_action); 1562 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1563
1564 dev_err(&unit->port->adapter->ccw_device->dev,
1565 "Unit ERP failed for unit 0x%016Lx on port 0x%016Lx.\n",
1566 unit->fcp_lun, unit->port->wwpn);
3043} 1567}
3044 1568
3045static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) 1569/**
1570 * zfcp_erp_wait - wait for completion of error recovery on an adapter
1571 * @adapter: adapter for which to wait for completion of its error recovery
1572 */
1573void zfcp_erp_wait(struct zfcp_adapter *adapter)
3046{ 1574{
3047 list_move(&erp_action->list, &erp_action->adapter->erp_running_head); 1575 wait_event(adapter->erp_done_wqh,
3048 zfcp_rec_dbf_event_action(145, erp_action); 1576 !(atomic_read(&adapter->status) &
1577 ZFCP_STATUS_ADAPTER_ERP_PENDING));
1578}
1579
1580/**
1581 * zfcp_erp_modify_adapter_status - change adapter status bits
1582 * @adapter: adapter to change the status
1583 * @id: id for the debug trace
1584 * @ref: reference for the debug trace
1585 * @mask: status bits to change
1586 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1587 *
1588 * Changes in common status bits are propagated to attached ports and units.
1589 */
1590void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id,
1591 void *ref, u32 mask, int set_or_clear)
1592{
1593 struct zfcp_port *port;
1594 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1595
1596 if (set_or_clear == ZFCP_SET) {
1597 if (status_change_set(mask, &adapter->status))
1598 zfcp_rec_dbf_event_adapter(id, ref, adapter);
1599 atomic_set_mask(mask, &adapter->status);
1600 } else {
1601 if (status_change_clear(mask, &adapter->status))
1602 zfcp_rec_dbf_event_adapter(id, ref, adapter);
1603 atomic_clear_mask(mask, &adapter->status);
1604 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1605 atomic_set(&adapter->erp_counter, 0);
1606 }
1607
1608 if (common_mask)
1609 list_for_each_entry(port, &adapter->port_list_head, list)
1610 zfcp_erp_modify_port_status(port, id, ref, common_mask,
1611 set_or_clear);
3049} 1612}
3050 1613
3051static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action) 1614/**
1615 * zfcp_erp_modify_port_status - change port status bits
1616 * @port: port to change the status bits
1617 * @id: id for the debug trace
1618 * @ref: reference for the debug trace
1619 * @mask: status bits to change
1620 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1621 *
1622 * Changes in common status bits are propagated to attached units.
1623 */
1624void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref,
1625 u32 mask, int set_or_clear)
3052{ 1626{
3053 list_move(&erp_action->list, &erp_action->adapter->erp_ready_head); 1627 struct zfcp_unit *unit;
3054 zfcp_rec_dbf_event_action(146, erp_action); 1628 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1629
1630 if (set_or_clear == ZFCP_SET) {
1631 if (status_change_set(mask, &port->status))
1632 zfcp_rec_dbf_event_port(id, ref, port);
1633 atomic_set_mask(mask, &port->status);
1634 } else {
1635 if (status_change_clear(mask, &port->status))
1636 zfcp_rec_dbf_event_port(id, ref, port);
1637 atomic_clear_mask(mask, &port->status);
1638 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1639 atomic_set(&port->erp_counter, 0);
1640 }
1641
1642 if (common_mask)
1643 list_for_each_entry(unit, &port->unit_list_head, list)
1644 zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
1645 set_or_clear);
3055} 1646}
3056 1647
1648/**
1649 * zfcp_erp_modify_unit_status - change unit status bits
1650 * @unit: unit to change the status bits
1651 * @id: id for the debug trace
1652 * @ref: reference for the debug trace
1653 * @mask: status bits to change
1654 * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
1655 */
1656void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref,
1657 u32 mask, int set_or_clear)
1658{
1659 if (set_or_clear == ZFCP_SET) {
1660 if (status_change_set(mask, &unit->status))
1661 zfcp_rec_dbf_event_unit(id, ref, unit);
1662 atomic_set_mask(mask, &unit->status);
1663 } else {
1664 if (status_change_clear(mask, &unit->status))
1665 zfcp_rec_dbf_event_unit(id, ref, unit);
1666 atomic_clear_mask(mask, &unit->status);
1667 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
1668 atomic_set(&unit->erp_counter, 0);
1669 }
1670 }
1671}
1672
1673/**
1674 * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP
1675 * @port: The "boxed" port.
1676 * @id: The debug trace id.
1677 * @id: Reference for the debug trace.
1678 */
3057void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref) 1679void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
3058{ 1680{
3059 unsigned long flags; 1681 unsigned long flags;
@@ -3065,6 +1687,12 @@ void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref)
3065 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1687 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
3066} 1688}
3067 1689
1690/**
1691 * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP
1692 * @port: The "boxed" unit.
1693 * @id: The debug trace id.
1694 * @id: Reference for the debug trace.
1695 */
3068void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref) 1696void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
3069{ 1697{
3070 zfcp_erp_modify_unit_status(unit, id, ref, 1698 zfcp_erp_modify_unit_status(unit, id, ref,
@@ -3072,6 +1700,15 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref)
3072 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1700 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
3073} 1701}
3074 1702
1703/**
1704 * zfcp_erp_port_access_denied - Adapter denied access to port.
1705 * @port: port where access has been denied
1706 * @id: id for debug trace
1707 * @ref: reference for debug trace
1708 *
1709 * Since the adapter has denied access, stop using the port and the
1710 * attached units.
1711 */
3075void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref) 1712void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
3076{ 1713{
3077 unsigned long flags; 1714 unsigned long flags;
@@ -3083,6 +1720,14 @@ void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref)
3083 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 1720 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
3084} 1721}
3085 1722
1723/**
1724 * zfcp_erp_unit_access_denied - Adapter denied access to unit.
1725 * @unit: unit where access has been denied
1726 * @id: id for debug trace
1727 * @ref: reference for debug trace
1728 *
1729 * Since the adapter has denied access, stop using the unit.
1730 */
3086void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref) 1731void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref)
3087{ 1732{
3088 zfcp_erp_modify_unit_status(unit, id, ref, 1733 zfcp_erp_modify_unit_status(unit, id, ref,
@@ -3090,67 +1735,54 @@ void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref)
3090 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); 1735 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
3091} 1736}
3092 1737
3093void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id, 1738static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id,
3094 void *ref) 1739 void *ref)
3095{ 1740{
3096 struct zfcp_port *port; 1741 int status = atomic_read(&unit->status);
3097 unsigned long flags; 1742 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
3098 1743 ZFCP_STATUS_COMMON_ACCESS_BOXED)))
3099 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
3100 return; 1744 return;
3101 1745
3102 read_lock_irqsave(&zfcp_data.config_lock, flags); 1746 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
3103 if (adapter->nameserver_port)
3104 zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
3105 list_for_each_entry(port, &adapter->port_list_head, list)
3106 if (port != adapter->nameserver_port)
3107 zfcp_erp_port_access_changed(port, id, ref);
3108 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
3109} 1747}
3110 1748
3111void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id, void *ref) 1749static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
1750 void *ref)
3112{ 1751{
3113 struct zfcp_adapter *adapter = port->adapter;
3114 struct zfcp_unit *unit; 1752 struct zfcp_unit *unit;
1753 int status = atomic_read(&port->status);
3115 1754
3116 if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, 1755 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
3117 &port->status) && 1756 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
3118 !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1757 if (!(status & ZFCP_STATUS_PORT_WKA))
3119 &port->status)) {
3120 if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
3121 list_for_each_entry(unit, &port->unit_list_head, list) 1758 list_for_each_entry(unit, &port->unit_list_head, list)
3122 zfcp_erp_unit_access_changed(unit, id, ref); 1759 zfcp_erp_unit_access_changed(unit, id, ref);
3123 return; 1760 return;
3124 } 1761 }
3125 1762
3126 ZFCP_LOG_NORMAL("reopen of port 0x%016Lx on adapter %s " 1763 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
3127 "(due to ACT update)\n",
3128 port->wwpn, zfcp_get_busid_by_adapter(adapter));
3129 if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref))
3130 ZFCP_LOG_NORMAL("failed reopen of port"
3131 "(adapter %s, wwpn=0x%016Lx)\n",
3132 zfcp_get_busid_by_adapter(adapter), port->wwpn);
3133} 1764}
3134 1765
3135void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id, void *ref) 1766/**
1767 * zfcp_erp_adapter_access_changed - Process change in adapter ACT
1768 * @adapter: Adapter where the Access Control Table (ACT) changed
1769 * @id: Id for debug trace
1770 * @ref: Reference for debug trace
1771 */
1772void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
1773 void *ref)
3136{ 1774{
3137 struct zfcp_adapter *adapter = unit->port->adapter; 1775 struct zfcp_port *port;
1776 unsigned long flags;
3138 1777
3139 if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, 1778 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
3140 &unit->status) &&
3141 !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
3142 &unit->status))
3143 return; 1779 return;
3144 1780
3145 ZFCP_LOG_NORMAL("reopen of unit 0x%016Lx on port 0x%016Lx " 1781 read_lock_irqsave(&zfcp_data.config_lock, flags);
3146 " on adapter %s (due to ACT update)\n", 1782 if (adapter->nameserver_port)
3147 unit->fcp_lun, unit->port->wwpn, 1783 zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
3148 zfcp_get_busid_by_adapter(adapter)); 1784 list_for_each_entry(port, &adapter->port_list_head, list)
3149 if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref)) 1785 if (port != adapter->nameserver_port)
3150 ZFCP_LOG_NORMAL("failed reopen of unit (adapter %s, " 1786 zfcp_erp_port_access_changed(port, id, ref);
3151 "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", 1787 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
3152 zfcp_get_busid_by_adapter(adapter),
3153 unit->port->wwpn, unit->fcp_lun);
3154} 1788}
3155
3156#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 6abf178fda5d..edfdb21591f3 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -1,22 +1,9 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * External function declarations.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
@@ -24,172 +11,50 @@
24 11
25#include "zfcp_def.h" 12#include "zfcp_def.h"
26 13
27extern struct zfcp_data zfcp_data; 14/* zfcp_aux.c */
28 15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *,
29/******************************** SYSFS *************************************/ 16 fcp_lun_t);
30extern struct attribute_group *zfcp_driver_attr_groups[]; 17extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *,
31extern int zfcp_sysfs_adapter_create_files(struct device *); 18 wwn_t);
32extern void zfcp_sysfs_adapter_remove_files(struct device *); 19extern int zfcp_adapter_enqueue(struct ccw_device *);
33extern int zfcp_sysfs_port_create_files(struct device *, u32); 20extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
34extern void zfcp_sysfs_port_remove_files(struct device *, u32); 21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32,
35extern int zfcp_sysfs_unit_create_files(struct device *); 22 u32);
36extern void zfcp_sysfs_unit_remove_files(struct device *); 23extern void zfcp_port_dequeue(struct zfcp_port *);
37extern void zfcp_sysfs_port_release(struct device *);
38extern void zfcp_sysfs_unit_release(struct device *);
39
40/**************************** CONFIGURATION *********************************/
41extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, fcp_lun_t);
42extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, wwn_t);
43extern struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *, u32);
44struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
45extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
46extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
47extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
48extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
49extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t,
50 u32, u32);
51extern void zfcp_port_dequeue(struct zfcp_port *);
52extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t); 24extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t);
53extern void zfcp_unit_dequeue(struct zfcp_unit *); 25extern void zfcp_unit_dequeue(struct zfcp_unit *);
54 26extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
55/******************************* S/390 IO ************************************/ 27extern void zfcp_sg_free_table(struct scatterlist *, int);
56extern int zfcp_ccw_register(void); 28extern int zfcp_sg_setup_table(struct scatterlist *, int);
57 29
58extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int); 30/* zfcp_ccw.c */
59extern int zfcp_qdio_allocate(struct zfcp_adapter *); 31extern int zfcp_ccw_register(void);
60extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); 32
61extern void zfcp_qdio_free_queues(struct zfcp_adapter *); 33/* zfcp_cfdc.c */
62extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, 34extern struct miscdevice zfcp_cfdc_misc;
63 struct zfcp_fsf_req *); 35
64 36/* zfcp_dbf.c */
65extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req 37extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
66 (struct zfcp_fsf_req *, int, int); 38extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
67extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr 39extern void zfcp_rec_dbf_event_thread(u8, struct zfcp_adapter *);
68 (struct zfcp_fsf_req *); 40extern void zfcp_rec_dbf_event_thread_lock(u8, struct zfcp_adapter *);
69extern int zfcp_qdio_sbals_from_sg 41extern void zfcp_rec_dbf_event_adapter(u8, void *, struct zfcp_adapter *);
70 (struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int, int); 42extern void zfcp_rec_dbf_event_port(u8, void *, struct zfcp_port *);
71extern int zfcp_qdio_sbals_from_scsicmnd 43extern void zfcp_rec_dbf_event_unit(u8, void *, struct zfcp_unit *);
72 (struct zfcp_fsf_req *, unsigned long, struct scsi_cmnd *); 44extern void zfcp_rec_dbf_event_trigger(u8, void *, u8, u8, void *,
73 45 struct zfcp_adapter *,
74
75/******************************** FSF ****************************************/
76extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
77extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
78extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
79
80extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
81extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
82
83extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
84extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
85 struct fsf_qtcb_bottom_config *);
86extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
87extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
88 struct fsf_qtcb_bottom_port *);
89extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
90 u32, u32, struct zfcp_sg_list *);
91extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long);
92extern void zfcp_erp_start_timer(struct zfcp_fsf_req *);
93extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
94extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
95extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
96 unsigned long *, struct zfcp_fsf_req **);
97extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
98 struct zfcp_erp_action *);
99extern int zfcp_fsf_send_els(struct zfcp_send_els *);
100extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
101 struct zfcp_unit *,
102 struct scsi_cmnd *, int, int);
103extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *);
104extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
105extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
106extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_command_task_management(
107 struct zfcp_adapter *, struct zfcp_unit *, u8, int);
108extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(
109 unsigned long, struct zfcp_adapter *, struct zfcp_unit *, int);
110
111/******************************* FC/FCP **************************************/
112extern int zfcp_nameserver_enqueue(struct zfcp_adapter *);
113extern int zfcp_ns_gid_pn_request(struct zfcp_erp_action *);
114extern int zfcp_check_ct_response(struct ct_hdr *);
115extern int zfcp_handle_els_rjt(u32, struct zfcp_ls_rjt_par *);
116extern void zfcp_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
117
118/******************************* SCSI ****************************************/
119extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
120extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
121extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
122extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
123extern void set_host_byte(int *, char);
124extern void set_driver_byte(int *, char);
125extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
126extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
127
128extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
129 struct scsi_cmnd *, int);
130extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, int);
131extern struct fc_function_template zfcp_transport_functions;
132
133/******************************** ERP ****************************************/
134extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
135 u32, int);
136extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
137extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
138extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
139
140extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
141 int);
142extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
143extern int zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
144extern int zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
145extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
146extern int zfcp_erp_port_reopen_all(struct zfcp_adapter *, int, u8, void *);
147
148extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
149 int);
150extern int zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
151extern int zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
152extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
153
154extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
155extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
156extern int zfcp_erp_wait(struct zfcp_adapter *);
157extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
158
159extern int zfcp_test_link(struct zfcp_port *);
160
161extern void zfcp_erp_port_boxed(struct zfcp_port *, u8 id, void *ref);
162extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8 id, void *ref);
163extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8 id, void *ref);
164extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8 id, void *ref);
165extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
166extern void zfcp_erp_port_access_changed(struct zfcp_port *, u8, void *);
167extern void zfcp_erp_unit_access_changed(struct zfcp_unit *, u8, void *);
168
169/******************************** AUX ****************************************/
170extern void zfcp_rec_dbf_event_thread(u8 id, struct zfcp_adapter *adapter,
171 int lock);
172extern void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *);
173extern void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port);
174extern void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit);
175extern void zfcp_rec_dbf_event_trigger(u8 id, void *ref, u8 want, u8 need,
176 void *action, struct zfcp_adapter *,
177 struct zfcp_port *, struct zfcp_unit *); 46 struct zfcp_port *, struct zfcp_unit *);
178extern void zfcp_rec_dbf_event_action(u8 id, struct zfcp_erp_action *); 47extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
179
180extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); 48extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
181extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, 49extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
182 struct fsf_status_read_buffer *); 50 struct fsf_status_read_buffer *);
183extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, 51extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
184 unsigned int, unsigned int, unsigned int, 52 int);
185 int, int);
186
187extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); 53extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
188extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); 54extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
189extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); 55extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
190extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); 56extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
191extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); 57extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
192
193extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, 58extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
194 struct scsi_cmnd *, 59 struct scsi_cmnd *,
195 struct zfcp_fsf_req *); 60 struct zfcp_fsf_req *);
@@ -198,6 +63,101 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
198 unsigned long); 63 unsigned long);
199extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 64extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
200 struct scsi_cmnd *); 65 struct scsi_cmnd *);
201extern int zfcp_reqlist_isempty(struct zfcp_adapter *); 66
67/* zfcp_erp.c */
68extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *,
69 u32, int);
70extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *);
71extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *);
72extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *);
73extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32,
74 int);
75extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *);
76extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *);
77extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *);
78extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *);
79extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32,
80 int);
81extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *);
82extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *);
83extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *);
84extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
85extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
86extern void zfcp_erp_wait(struct zfcp_adapter *);
87extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
88extern void zfcp_erp_port_boxed(struct zfcp_port *, u8, void *);
89extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8, void *);
90extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
91extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
92extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
93extern void zfcp_erp_timeout_handler(unsigned long);
94
95/* zfcp_fc.c */
96extern int zfcp_scan_ports(struct zfcp_adapter *);
97extern void _zfcp_scan_ports_later(struct work_struct *);
98extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
99extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *);
100extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
101extern void zfcp_test_link(struct zfcp_port *);
102
103/* zfcp_fsf.c */
104extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
105extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
106extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
107extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
108extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
109extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
110extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
111 struct fsf_qtcb_bottom_config *);
112extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
113extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
114 struct fsf_qtcb_bottom_port *);
115extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
116 struct zfcp_fsf_cfdc *);
117extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
118extern int zfcp_fsf_status_read(struct zfcp_adapter *);
119extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
120extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
121 struct zfcp_erp_action *);
122extern int zfcp_fsf_send_els(struct zfcp_send_els *);
123extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
124 struct zfcp_unit *,
125 struct scsi_cmnd *, int, int);
126extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
127extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
128extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *,
129 struct zfcp_unit *, u8, int);
130extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
131 struct zfcp_adapter *,
132 struct zfcp_unit *, int);
133
134/* zfcp_qdio.c */
135extern int zfcp_qdio_allocate(struct zfcp_adapter *);
136extern void zfcp_qdio_free(struct zfcp_adapter *);
137extern int zfcp_qdio_send(struct zfcp_fsf_req *);
138extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req(
139 struct zfcp_fsf_req *);
140extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr(
141 struct zfcp_fsf_req *);
142extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
143 struct scatterlist *, int);
144extern int zfcp_qdio_open(struct zfcp_adapter *);
145extern void zfcp_qdio_close(struct zfcp_adapter *);
146
147/* zfcp_scsi.c */
148extern struct zfcp_data zfcp_data;
149extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
150extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
151extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
152extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
153extern struct fc_function_template zfcp_transport_functions;
154
155/* zfcp_sysfs.c */
156extern struct attribute_group zfcp_sysfs_unit_attrs;
157extern struct attribute_group zfcp_sysfs_adapter_attrs;
158extern struct attribute_group zfcp_sysfs_ns_port_attrs;
159extern struct attribute_group zfcp_sysfs_port_attrs;
160extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
161extern struct device_attribute *zfcp_sysfs_shost_attrs[];
202 162
203#endif /* ZFCP_EXT_H */ 163#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
new file mode 100644
index 000000000000..e984469bb98b
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -0,0 +1,567 @@
1/*
2 * zfcp device driver
3 *
4 * Fibre Channel related functions for the zfcp device driver.
5 *
6 * Copyright IBM Corporation 2008
7 */
8
9#include "zfcp_ext.h"
10
11struct ct_iu_gpn_ft_req {
12 struct ct_hdr header;
13 u8 flags;
14 u8 domain_id_scope;
15 u8 area_id_scope;
16 u8 fc4_type;
17} __attribute__ ((packed));
18
19struct gpn_ft_resp_acc {
20 u8 control;
21 u8 port_id[3];
22 u8 reserved[4];
23 u64 wwpn;
24} __attribute__ ((packed));
25
26#define ZFCP_GPN_FT_ENTRIES ((PAGE_SIZE - sizeof(struct ct_hdr)) \
27 / sizeof(struct gpn_ft_resp_acc))
28#define ZFCP_GPN_FT_BUFFERS 4
29#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
30
31struct ct_iu_gpn_ft_resp {
32 struct ct_hdr header;
33 struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
34} __attribute__ ((packed));
35
36struct zfcp_gpn_ft {
37 struct zfcp_send_ct ct;
38 struct scatterlist sg_req;
39 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
40};
41
42static struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *adapter,
43 u32 d_id)
44{
45 struct zfcp_port *port;
46
47 list_for_each_entry(port, &adapter->port_list_head, list)
48 if ((port->d_id == d_id) &&
49 !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status))
50 return port;
51 return NULL;
52}
53
54static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
55 struct fcp_rscn_element *elem)
56{
57 unsigned long flags;
58 struct zfcp_port *port;
59
60 read_lock_irqsave(&zfcp_data.config_lock, flags);
61 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
62 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
63 continue;
64 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */
65 if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status))
66 /* Try to connect to unused ports anyway. */
67 zfcp_erp_port_reopen(port,
68 ZFCP_STATUS_COMMON_ERP_FAILED,
69 82, fsf_req);
70 else if ((port->d_id & range) == (elem->nport_did & range))
71 /* Check connection status for connected ports */
72 zfcp_test_link(port);
73 }
74 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
75}
76
77static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
78{
79 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
80 struct fcp_rscn_head *fcp_rscn_head;
81 struct fcp_rscn_element *fcp_rscn_element;
82 u16 i;
83 u16 no_entries;
84 u32 range_mask;
85
86 fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data;
87 fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head;
88
89 /* see FC-FS */
90 no_entries = fcp_rscn_head->payload_len /
91 sizeof(struct fcp_rscn_element);
92
93 for (i = 1; i < no_entries; i++) {
94 /* skip head and start with 1st element */
95 fcp_rscn_element++;
96 switch (fcp_rscn_element->addr_format) {
97 case ZFCP_PORT_ADDRESS:
98 range_mask = ZFCP_PORTS_RANGE_PORT;
99 break;
100 case ZFCP_AREA_ADDRESS:
101 range_mask = ZFCP_PORTS_RANGE_AREA;
102 break;
103 case ZFCP_DOMAIN_ADDRESS:
104 range_mask = ZFCP_PORTS_RANGE_DOMAIN;
105 break;
106 case ZFCP_FABRIC_ADDRESS:
107 range_mask = ZFCP_PORTS_RANGE_FABRIC;
108 break;
109 default:
110 continue;
111 }
112 _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
113 }
114 schedule_work(&fsf_req->adapter->scan_work);
115}
116
117static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn)
118{
119 struct zfcp_adapter *adapter = req->adapter;
120 struct zfcp_port *port;
121 unsigned long flags;
122
123 read_lock_irqsave(&zfcp_data.config_lock, flags);
124 list_for_each_entry(port, &adapter->port_list_head, list)
125 if (port->wwpn == wwpn)
126 break;
127 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
128
129 if (port && (port->wwpn == wwpn))
130 zfcp_erp_port_forced_reopen(port, 0, 83, req);
131}
132
133static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
134{
135 struct fsf_status_read_buffer *status_buffer =
136 (struct fsf_status_read_buffer *)req->data;
137 struct fsf_plogi *els_plogi =
138 (struct fsf_plogi *) status_buffer->payload.data;
139
140 zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn);
141}
142
143static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
144{
145 struct fsf_status_read_buffer *status_buffer =
146 (struct fsf_status_read_buffer *)req->data;
147 struct fcp_logo *els_logo =
148 (struct fcp_logo *) status_buffer->payload.data;
149
150 zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn);
151}
152
153/**
154 * zfcp_fc_incoming_els - handle incoming ELS
155 * @fsf_req - request which contains incoming ELS
156 */
157void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
158{
159 struct fsf_status_read_buffer *status_buffer =
160 (struct fsf_status_read_buffer *) fsf_req->data;
161 unsigned int els_type = status_buffer->payload.data[0];
162
163 zfcp_san_dbf_event_incoming_els(fsf_req);
164 if (els_type == LS_PLOGI)
165 zfcp_fc_incoming_plogi(fsf_req);
166 else if (els_type == LS_LOGO)
167 zfcp_fc_incoming_logo(fsf_req);
168 else if (els_type == LS_RSCN)
169 zfcp_fc_incoming_rscn(fsf_req);
170}
171
172static void zfcp_ns_gid_pn_handler(unsigned long data)
173{
174 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
175 struct zfcp_send_ct *ct = &gid_pn->ct;
176 struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
177 struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
178 struct zfcp_port *port = gid_pn->port;
179
180 if (ct->status)
181 goto out;
182 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
183 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
184 goto out;
185 }
186 /* paranoia */
187 if (ct_iu_req->wwpn != port->wwpn)
188 goto out;
189 /* looks like a valid d_id */
190 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
191 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
192out:
193 mempool_free(gid_pn, port->adapter->pool.data_gid_pn);
194}
195
196/**
197 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
198 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
199 * return: -ENOMEM on error, 0 otherwise
200 */
201int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
202{
203 int ret;
204 struct zfcp_gid_pn_data *gid_pn;
205 struct zfcp_adapter *adapter = erp_action->adapter;
206
207 gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
208 if (!gid_pn)
209 return -ENOMEM;
210
211 memset(gid_pn, 0, sizeof(*gid_pn));
212
213 /* setup parameters for send generic command */
214 gid_pn->port = erp_action->port;
215 gid_pn->ct.port = adapter->nameserver_port;
216 gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
217 gid_pn->ct.handler_data = (unsigned long) gid_pn;
218 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
219 gid_pn->ct.req = &gid_pn->req;
220 gid_pn->ct.resp = &gid_pn->resp;
221 gid_pn->ct.req_count = 1;
222 gid_pn->ct.resp_count = 1;
223 sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
224 sizeof(struct ct_iu_gid_pn_req));
225 sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
226 sizeof(struct ct_iu_gid_pn_resp));
227
228 /* setup nameserver request */
229 gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION;
230 gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
231 gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
232 gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
233 gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
234 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
235 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
236
237 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
238 erp_action);
239 if (ret)
240 mempool_free(gid_pn, adapter->pool.data_gid_pn);
241 return ret;
242}
243
244/**
245 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
246 * @port: zfcp_port structure
247 * @plogi: plogi payload
248 *
249 * Evaluate PLOGI playload and copy important fields into zfcp_port structure
250 */
251void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi)
252{
253 port->maxframe_size = plogi->serv_param.common_serv_param[7] |
254 ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8);
255 if (plogi->serv_param.class1_serv_param[0] & 0x80)
256 port->supported_classes |= FC_COS_CLASS1;
257 if (plogi->serv_param.class2_serv_param[0] & 0x80)
258 port->supported_classes |= FC_COS_CLASS2;
259 if (plogi->serv_param.class3_serv_param[0] & 0x80)
260 port->supported_classes |= FC_COS_CLASS3;
261 if (plogi->serv_param.class4_serv_param[0] & 0x80)
262 port->supported_classes |= FC_COS_CLASS4;
263}
264
265struct zfcp_els_adisc {
266 struct zfcp_send_els els;
267 struct scatterlist req;
268 struct scatterlist resp;
269 struct zfcp_ls_adisc ls_adisc;
270 struct zfcp_ls_adisc_acc ls_adisc_acc;
271};
272
273static void zfcp_fc_adisc_handler(unsigned long data)
274{
275 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
276 struct zfcp_port *port = adisc->els.port;
277 struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc;
278
279 if (adisc->els.status) {
280 /* request rejected or timed out */
281 zfcp_erp_port_forced_reopen(port, 0, 63, NULL);
282 goto out;
283 }
284
285 if (!port->wwnn)
286 port->wwnn = ls_adisc->wwnn;
287
288 if (port->wwpn != ls_adisc->wwpn)
289 zfcp_erp_port_reopen(port, 0, 64, NULL);
290
291 out:
292 zfcp_port_put(port);
293 kfree(adisc);
294}
295
296static int zfcp_fc_adisc(struct zfcp_port *port)
297{
298 struct zfcp_els_adisc *adisc;
299 struct zfcp_adapter *adapter = port->adapter;
300
301 adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC);
302 if (!adisc)
303 return -ENOMEM;
304
305 adisc->els.req = &adisc->req;
306 adisc->els.resp = &adisc->resp;
307 sg_init_one(adisc->els.req, &adisc->ls_adisc,
308 sizeof(struct zfcp_ls_adisc));
309 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
310 sizeof(struct zfcp_ls_adisc_acc));
311
312 adisc->els.req_count = 1;
313 adisc->els.resp_count = 1;
314 adisc->els.adapter = adapter;
315 adisc->els.port = port;
316 adisc->els.d_id = port->d_id;
317 adisc->els.handler = zfcp_fc_adisc_handler;
318 adisc->els.handler_data = (unsigned long) adisc;
319 adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
320
321 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
322 without FC-AL-2 capability, so we don't set it */
323 adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host);
324 adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host);
325 adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host);
326
327 return zfcp_fsf_send_els(&adisc->els);
328}
329
330/**
331 * zfcp_test_link - lightweight link test procedure
332 * @port: port to be tested
333 *
334 * Test status of a link to a remote port using the ELS command ADISC.
335 * If there is a problem with the remote port, error recovery steps
336 * will be triggered.
337 */
338void zfcp_test_link(struct zfcp_port *port)
339{
340 int retval;
341
342 zfcp_port_get(port);
343 retval = zfcp_fc_adisc(port);
344 if (retval == 0 || retval == -EBUSY)
345 return;
346
347 /* send of ADISC was not possible */
348 zfcp_port_put(port);
349 zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
350}
351
352static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter)
353{
354 int ret;
355
356 if (!adapter->nameserver_port)
357 return -EINTR;
358
359 if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
360 &adapter->nameserver_port->status)) {
361 ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148,
362 NULL);
363 if (ret)
364 return ret;
365 zfcp_erp_wait(adapter);
366 zfcp_port_put(adapter->nameserver_port);
367 }
368 return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
369 &adapter->nameserver_port->status);
370}
371
372static void zfcp_gpn_ft_handler(unsigned long _done)
373{
374 complete((struct completion *)_done);
375}
376
377static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
378{
379 struct scatterlist *sg = &gpn_ft->sg_req;
380
381 kfree(sg_virt(sg)); /* free request buffer */
382 zfcp_sg_free_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS);
383
384 kfree(gpn_ft);
385}
386
387static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
388{
389 struct zfcp_gpn_ft *gpn_ft;
390 struct ct_iu_gpn_ft_req *req;
391
392 gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
393 if (!gpn_ft)
394 return NULL;
395
396 req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL);
397 if (!req) {
398 kfree(gpn_ft);
399 gpn_ft = NULL;
400 goto out;
401 }
402 sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
403
404 if (zfcp_sg_setup_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS)) {
405 zfcp_free_sg_env(gpn_ft);
406 gpn_ft = NULL;
407 }
408out:
409 return gpn_ft;
410}
411
412
413static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
414 struct zfcp_adapter *adapter)
415{
416 struct zfcp_send_ct *ct = &gpn_ft->ct;
417 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
418 struct completion done;
419 int ret;
420
421 /* prepare CT IU for GPN_FT */
422 req->header.revision = ZFCP_CT_REVISION;
423 req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
424 req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
425 req->header.options = ZFCP_CT_SYNCHRONOUS;
426 req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
427 req->header.max_res_size = (sizeof(struct gpn_ft_resp_acc) *
428 (ZFCP_GPN_FT_MAX_ENTRIES - 1)) >> 2;
429 req->flags = 0;
430 req->domain_id_scope = 0;
431 req->area_id_scope = 0;
432 req->fc4_type = ZFCP_CT_SCSI_FCP;
433
434 /* prepare zfcp_send_ct */
435 ct->port = adapter->nameserver_port;
436 ct->handler = zfcp_gpn_ft_handler;
437 ct->handler_data = (unsigned long)&done;
438 ct->timeout = 10;
439 ct->req = &gpn_ft->sg_req;
440 ct->resp = gpn_ft->sg_resp;
441 ct->req_count = 1;
442 ct->resp_count = ZFCP_GPN_FT_BUFFERS;
443
444 init_completion(&done);
445 ret = zfcp_fsf_send_ct(ct, NULL, NULL);
446 if (!ret)
447 wait_for_completion(&done);
448 return ret;
449}
450
451static void zfcp_validate_port(struct zfcp_port *port)
452{
453 struct zfcp_adapter *adapter = port->adapter;
454
455 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
456
457 if (port == adapter->nameserver_port)
458 return;
459 if ((port->supported_classes != 0) || (port->units != 0)) {
460 zfcp_port_put(port);
461 return;
462 }
463 zfcp_erp_port_shutdown(port, 0, 151, NULL);
464 zfcp_erp_wait(adapter);
465 zfcp_port_put(port);
466 zfcp_port_dequeue(port);
467}
468
469static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
470{
471 struct zfcp_send_ct *ct = &gpn_ft->ct;
472 struct scatterlist *sg = gpn_ft->sg_resp;
473 struct ct_hdr *hdr = sg_virt(sg);
474 struct gpn_ft_resp_acc *acc = sg_virt(sg);
475 struct zfcp_adapter *adapter = ct->port->adapter;
476 struct zfcp_port *port, *tmp;
477 u32 d_id;
478 int ret = 0, x;
479
480 if (ct->status)
481 return -EIO;
482
483 if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) {
484 if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD)
485 return -EAGAIN; /* might be a temporary condition */
486 return -EIO;
487 }
488
489 if (hdr->max_res_size)
490 return -E2BIG;
491
492 down(&zfcp_data.config_sema);
493
494 /* first entry is the header */
495 for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES; x++) {
496 if (x % (ZFCP_GPN_FT_ENTRIES + 1))
497 acc++;
498 else
499 acc = sg_virt(++sg);
500
501 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
502 acc->port_id[2];
503
504 /* skip the adapter's port and known remote ports */
505 if (acc->wwpn == fc_host_port_name(adapter->scsi_host) ||
506 zfcp_get_port_by_did(adapter, d_id))
507 continue;
508
509 port = zfcp_port_enqueue(adapter, acc->wwpn,
510 ZFCP_STATUS_PORT_DID_DID |
511 ZFCP_STATUS_COMMON_NOESC, d_id);
512 if (IS_ERR(port))
513 ret = PTR_ERR(port);
514 else
515 zfcp_erp_port_reopen(port, 0, 149, NULL);
516 if (acc->control & 0x80) /* last entry */
517 break;
518 }
519
520 zfcp_erp_wait(adapter);
521 list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
522 zfcp_validate_port(port);
523 up(&zfcp_data.config_sema);
524 return ret;
525}
526
527/**
528 * zfcp_scan_ports - scan remote ports and attach new ports
529 * @adapter: pointer to struct zfcp_adapter
530 */
531int zfcp_scan_ports(struct zfcp_adapter *adapter)
532{
533 int ret, i;
534 struct zfcp_gpn_ft *gpn_ft;
535
536 zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */
537 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
538 return 0;
539
540 ret = zfcp_scan_get_nameserver(adapter);
541 if (ret)
542 return ret;
543
544 gpn_ft = zfcp_alloc_sg_env();
545 if (!gpn_ft)
546 return -ENOMEM;
547
548 for (i = 0; i < 3; i++) {
549 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
550 if (!ret) {
551 ret = zfcp_scan_eval_gpn_ft(gpn_ft);
552 if (ret == -EAGAIN)
553 ssleep(1);
554 else
555 break;
556 }
557 }
558 zfcp_free_sg_env(gpn_ft);
559
560 return ret;
561}
562
563
564void _zfcp_scan_ports_later(struct work_struct *work)
565{
566 zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
567}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index b2ea4ea051f5..19c1ca913874 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1,54 +1,37 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Implementation of FSF commands.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#include "zfcp_ext.h" 9#include "zfcp_ext.h"
23 10
24static int zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *); 11static void zfcp_fsf_request_timeout_handler(unsigned long data)
25static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *); 12{
26static int zfcp_fsf_open_port_handler(struct zfcp_fsf_req *); 13 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
27static int zfcp_fsf_close_port_handler(struct zfcp_fsf_req *); 14 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62,
28static int zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *); 15 NULL);
29static int zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *); 16}
30static int zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *); 17
31static int zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *); 18static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
32static int zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *); 19 unsigned long timeout)
33static int zfcp_fsf_send_fcp_command_task_management_handler( 20{
34 struct zfcp_fsf_req *); 21 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
35static int zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *); 22 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
36static int zfcp_fsf_status_read_handler(struct zfcp_fsf_req *); 23 fsf_req->timer.expires = jiffies + timeout;
37static int zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *); 24 add_timer(&fsf_req->timer);
38static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *); 25}
39static int zfcp_fsf_control_file_handler(struct zfcp_fsf_req *); 26
40static inline int zfcp_fsf_req_sbal_check( 27static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
41 unsigned long *, struct zfcp_qdio_queue *, int); 28{
42static inline int zfcp_use_one_sbal( 29 BUG_ON(!fsf_req->erp_action);
43 struct scatterlist *, int, struct scatterlist *, int); 30 fsf_req->timer.function = zfcp_erp_timeout_handler;
44static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int); 31 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
45static int zfcp_fsf_req_send(struct zfcp_fsf_req *); 32 fsf_req->timer.expires = jiffies + 30 * HZ;
46static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); 33 add_timer(&fsf_req->timer);
47static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); 34}
48static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
49static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *, u8,
50 struct fsf_link_down_info *);
51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
52 35
53/* association between FSF command and FSF QTCB type */ 36/* association between FSF command and FSF QTCB type */
54static u32 fsf_qtcb_type[] = { 37static u32 fsf_qtcb_type[] = {
@@ -67,96 +50,77 @@ static u32 fsf_qtcb_type[] = {
67 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
68}; 51};
69 52
70static const char zfcp_act_subtable_type[5][8] = { 53static const char *zfcp_act_subtable_type[] = {
71 "unknown", "OS", "WWPN", "DID", "LUN" 54 "unknown", "OS", "WWPN", "DID", "LUN"
72}; 55};
73 56
74/****************************************************************/ 57static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
75/*************** FSF related Functions *************************/
76/****************************************************************/
77
78#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
79
80/*
81 * function: zfcp_fsf_req_alloc
82 *
83 * purpose: Obtains an fsf_req and potentially a qtcb (for all but
84 * unsolicited requests) via helper functions
85 * Does some initial fsf request set-up.
86 *
87 * returns: pointer to allocated fsf_req if successfull
88 * NULL otherwise
89 *
90 * locks: none
91 *
92 */
93static struct zfcp_fsf_req *
94zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
95{ 58{
96 size_t size; 59 u16 subtable = table >> 16;
97 void *ptr; 60 u16 rule = table & 0xffff;
98 struct zfcp_fsf_req *fsf_req = NULL;
99 61
100 if (req_flags & ZFCP_REQ_NO_QTCB) 62 if (subtable && subtable < ARRAY_SIZE(zfcp_act_subtable_type))
101 size = sizeof(struct zfcp_fsf_req); 63 dev_warn(&adapter->ccw_device->dev,
102 else 64 "Access denied in subtable %s, rule %d.\n",
103 size = sizeof(struct zfcp_fsf_req_qtcb); 65 zfcp_act_subtable_type[subtable], rule);
104 66}
105 if (likely(pool))
106 ptr = mempool_alloc(pool, GFP_ATOMIC);
107 else {
108 if (req_flags & ZFCP_REQ_NO_QTCB)
109 ptr = kmalloc(size, GFP_ATOMIC);
110 else
111 ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
112 GFP_ATOMIC);
113 }
114
115 if (unlikely(!ptr))
116 goto out;
117
118 memset(ptr, 0, size);
119 67
120 if (req_flags & ZFCP_REQ_NO_QTCB) { 68static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
121 fsf_req = (struct zfcp_fsf_req *) ptr; 69 struct zfcp_port *port)
122 } else { 70{
123 fsf_req = &((struct zfcp_fsf_req_qtcb *) ptr)->fsf_req; 71 struct fsf_qtcb_header *header = &req->qtcb->header;
124 fsf_req->qtcb = &((struct zfcp_fsf_req_qtcb *) ptr)->qtcb; 72 dev_warn(&req->adapter->ccw_device->dev,
125 } 73 "Access denied, cannot send command to port 0x%016Lx.\n",
74 port->wwpn);
75 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
76 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
77 zfcp_erp_port_access_denied(port, 55, req);
78 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
79}
126 80
127 fsf_req->pool = pool; 81static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
82 struct zfcp_unit *unit)
83{
84 struct fsf_qtcb_header *header = &req->qtcb->header;
85 dev_warn(&req->adapter->ccw_device->dev,
86 "Access denied for unit 0x%016Lx on port 0x%016Lx.\n",
87 unit->fcp_lun, unit->port->wwpn);
88 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
89 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
90 zfcp_erp_unit_access_denied(unit, 59, req);
91 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
92}
128 93
129 out: 94static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
130 return fsf_req; 95{
96 dev_err(&req->adapter->ccw_device->dev,
97 "Required FC class not supported by adapter, "
98 "shutting down adapter.\n");
99 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
100 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
131} 101}
132 102
133/* 103/**
134 * function: zfcp_fsf_req_free 104 * zfcp_fsf_req_free - free memory used by fsf request
135 * 105 * @fsf_req: pointer to struct zfcp_fsf_req
136 * purpose: Frees the memory of an fsf_req (and potentially a qtcb) or
137 * returns it into the pool via helper functions.
138 *
139 * returns: sod all
140 *
141 * locks: none
142 */ 106 */
143void 107void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
144zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
145{ 108{
146 if (likely(fsf_req->pool)) { 109 if (likely(req->pool)) {
147 mempool_free(fsf_req, fsf_req->pool); 110 mempool_free(req, req->pool);
148 return; 111 return;
149 } 112 }
150 113
151 if (fsf_req->qtcb) { 114 if (req->qtcb) {
152 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, fsf_req); 115 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req);
153 return; 116 return;
154 } 117 }
155
156 kfree(fsf_req);
157} 118}
158 119
159/* 120/**
121 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
122 * @adapter: pointer to struct zfcp_adapter
123 *
160 * Never ever call this without shutting down the adapter first. 124 * Never ever call this without shutting down the adapter first.
161 * Otherwise the adapter would continue using and corrupting s390 storage. 125 * Otherwise the adapter would continue using and corrupting s390 storage.
162 * Included BUG_ON() call to ensure this is done. 126 * Included BUG_ON() call to ensure this is done.
@@ -164,2353 +128,1359 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
164 */ 128 */
165void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 129void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
166{ 130{
167 struct zfcp_fsf_req *fsf_req, *tmp; 131 struct zfcp_fsf_req *req, *tmp;
168 unsigned long flags; 132 unsigned long flags;
169 LIST_HEAD(remove_queue); 133 LIST_HEAD(remove_queue);
170 unsigned int i; 134 unsigned int i;
171 135
172 BUG_ON(atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)); 136 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
173 spin_lock_irqsave(&adapter->req_list_lock, flags); 137 spin_lock_irqsave(&adapter->req_list_lock, flags);
174 atomic_set(&adapter->reqs_active, 0);
175 for (i = 0; i < REQUEST_LIST_SIZE; i++) 138 for (i = 0; i < REQUEST_LIST_SIZE; i++)
176 list_splice_init(&adapter->req_list[i], &remove_queue); 139 list_splice_init(&adapter->req_list[i], &remove_queue);
177 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 140 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
178 141
179 list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { 142 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
180 list_del(&fsf_req->list); 143 list_del(&req->list);
181 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 144 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
182 zfcp_fsf_req_complete(fsf_req); 145 zfcp_fsf_req_complete(req);
183 } 146 }
184} 147}
185 148
186/* 149static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
187 * function: zfcp_fsf_req_complete
188 *
189 * purpose: Updates active counts and timers for openfcp-reqs
190 * May cleanup request after req_eval returns
191 *
192 * returns: 0 - success
193 * !0 - failure
194 *
195 * context:
196 */
197int
198zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
199{ 150{
200 int retval = 0; 151 struct fsf_status_read_buffer *sr_buf = req->data;
201 int cleanup; 152 struct zfcp_adapter *adapter = req->adapter;
202 153 struct zfcp_port *port;
203 if (unlikely(fsf_req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { 154 int d_id = sr_buf->d_id & ZFCP_DID_MASK;
204 ZFCP_LOG_DEBUG("Status read response received\n"); 155 unsigned long flags;
205 /*
206 * Note: all cleanup handling is done in the callchain of
207 * the function call-chain below.
208 */
209 zfcp_fsf_status_read_handler(fsf_req);
210 goto out;
211 } else {
212 del_timer(&fsf_req->timer);
213 zfcp_fsf_protstatus_eval(fsf_req);
214 }
215
216 /*
217 * fsf_req may be deleted due to waking up functions, so
218 * cleanup is saved here and used later
219 */
220 if (likely(fsf_req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
221 cleanup = 1;
222 else
223 cleanup = 0;
224
225 fsf_req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
226 156
227 /* cleanup request if requested by initiator */ 157 read_lock_irqsave(&zfcp_data.config_lock, flags);
228 if (likely(cleanup)) { 158 list_for_each_entry(port, &adapter->port_list_head, list)
229 ZFCP_LOG_TRACE("removing FSF request %p\n", fsf_req); 159 if (port->d_id == d_id) {
230 /* 160 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
231 * lock must not be held here since it will be 161 switch (sr_buf->status_subtype) {
232 * grabed by the called routine, too 162 case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
233 */ 163 zfcp_erp_port_reopen(port, 0, 101, req);
234 zfcp_fsf_req_free(fsf_req); 164 break;
235 } else { 165 case FSF_STATUS_READ_SUB_ERROR_PORT:
236 /* notify initiator waiting for the requests completion */ 166 zfcp_erp_port_shutdown(port, 0, 122, req);
237 ZFCP_LOG_TRACE("waking initiator of FSF request %p\n",fsf_req); 167 break;
238 /* 168 }
239 * FIXME: Race! We must not access fsf_req here as it might have been 169 return;
240 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED 170 }
241 * flag. It's an improbable case. But, we have the same paranoia for 171 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
242 * the cleanup flag already. 172}
243 * Might better be handled using complete()?
244 * (setting the flag and doing wakeup ought to be atomic
245 * with regard to checking the flag as long as waitqueue is
246 * part of the to be released structure)
247 */
248 wake_up(&fsf_req->completion_wq);
249 }
250 173
251 out: 174static void zfcp_fsf_bit_error_threshold(struct zfcp_fsf_req *req)
252 return retval; 175{
176 struct zfcp_adapter *adapter = req->adapter;
177 struct fsf_status_read_buffer *sr_buf = req->data;
178 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
179
180 dev_warn(&adapter->ccw_device->dev,
181 "Warning: bit error threshold data "
182 "received for the adapter: "
183 "link failures = %i, loss of sync errors = %i, "
184 "loss of signal errors = %i, "
185 "primitive sequence errors = %i, "
186 "invalid transmission word errors = %i, "
187 "CRC errors = %i).\n",
188 err->link_failure_error_count,
189 err->loss_of_sync_error_count,
190 err->loss_of_signal_error_count,
191 err->primitive_sequence_error_count,
192 err->invalid_transmission_word_error_count,
193 err->crc_error_count);
194 dev_warn(&adapter->ccw_device->dev,
195 "Additional bit error threshold data of the adapter: "
196 "primitive sequence event time-outs = %i, "
197 "elastic buffer overrun errors = %i, "
198 "advertised receive buffer-to-buffer credit = %i, "
199 "current receice buffer-to-buffer credit = %i, "
200 "advertised transmit buffer-to-buffer credit = %i, "
201 "current transmit buffer-to-buffer credit = %i).\n",
202 err->primitive_sequence_event_timeout_count,
203 err->elastic_buffer_overrun_error_count,
204 err->advertised_receive_b2b_credit,
205 err->current_receive_b2b_credit,
206 err->advertised_transmit_b2b_credit,
207 err->current_transmit_b2b_credit);
253} 208}
254 209
255/* 210static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
256 * function: zfcp_fsf_protstatus_eval 211 struct fsf_link_down_info *link_down)
257 *
258 * purpose: evaluates the QTCB of the finished FSF request
259 * and initiates appropriate actions
260 * (usually calling FSF command specific handlers)
261 *
262 * returns:
263 *
264 * context:
265 *
266 * locks:
267 */
268static int
269zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
270{ 212{
271 int retval = 0; 213 struct zfcp_adapter *adapter = req->adapter;
272 struct zfcp_adapter *adapter = fsf_req->adapter;
273 struct fsf_qtcb *qtcb = fsf_req->qtcb;
274 union fsf_prot_status_qual *prot_status_qual =
275 &qtcb->prefix.prot_status_qual;
276
277 zfcp_hba_dbf_event_fsf_response(fsf_req);
278
279 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
280 ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
281 (unsigned long) fsf_req);
282 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
283 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
284 goto skip_protstatus;
285 }
286 214
287 /* evaluate FSF Protocol Status */ 215 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
288 switch (qtcb->prefix.prot_status) { 216 return;
289 217
290 case FSF_PROT_GOOD: 218 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
291 case FSF_PROT_FSF_STATUS_PRESENTED:
292 break;
293 219
294 case FSF_PROT_QTCB_VERSION_ERROR: 220 if (!link_down)
295 ZFCP_LOG_NORMAL("error: The adapter %s contains " 221 goto out;
296 "microcode of version 0x%x, the device driver "
297 "only supports 0x%x. Aborting.\n",
298 zfcp_get_busid_by_adapter(adapter),
299 prot_status_qual->version_error.fsf_version,
300 ZFCP_QTCB_VERSION);
301 zfcp_erp_adapter_shutdown(adapter, 0, 117, fsf_req);
302 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
303 break;
304 222
305 case FSF_PROT_SEQ_NUMB_ERROR: 223 switch (link_down->error_code) {
306 ZFCP_LOG_NORMAL("bug: Sequence number mismatch between " 224 case FSF_PSQ_LINK_NO_LIGHT:
307 "driver (0x%x) and adapter %s (0x%x). " 225 dev_warn(&req->adapter->ccw_device->dev,
308 "Restarting all operations on this adapter.\n", 226 "The local link is down: no light detected.\n");
309 qtcb->prefix.req_seq_no,
310 zfcp_get_busid_by_adapter(adapter),
311 prot_status_qual->sequence_error.exp_req_seq_no);
312 zfcp_erp_adapter_reopen(adapter, 0, 98, fsf_req);
313 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
314 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
315 break; 227 break;
316 228 case FSF_PSQ_LINK_WRAP_PLUG:
317 case FSF_PROT_UNSUPP_QTCB_TYPE: 229 dev_warn(&req->adapter->ccw_device->dev,
318 ZFCP_LOG_NORMAL("error: Packet header type used by the " 230 "The local link is down: wrap plug detected.\n");
319 "device driver is incompatible with "
320 "that used on adapter %s. "
321 "Stopping all operations on this adapter.\n",
322 zfcp_get_busid_by_adapter(adapter));
323 zfcp_erp_adapter_shutdown(adapter, 0, 118, fsf_req);
324 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
325 break; 231 break;
326 232 case FSF_PSQ_LINK_NO_FCP:
327 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 233 dev_warn(&req->adapter->ccw_device->dev,
328 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 234 "The local link is down: "
329 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 235 "adjacent node on link does not support FCP.\n");
330 &(adapter->status));
331 break; 236 break;
332 237 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
333 case FSF_PROT_DUPLICATE_REQUEST_ID: 238 dev_warn(&req->adapter->ccw_device->dev,
334 ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx " 239 "The local link is down: "
335 "to the adapter %s is ambiguous. " 240 "firmware update in progress.\n");
336 "Stopping all operations on this adapter.\n",
337 *(unsigned long long*)
338 (&qtcb->bottom.support.req_handle),
339 zfcp_get_busid_by_adapter(adapter));
340 zfcp_erp_adapter_shutdown(adapter, 0, 78, fsf_req);
341 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
342 break; 241 break;
343 242 case FSF_PSQ_LINK_INVALID_WWPN:
344 case FSF_PROT_LINK_DOWN: 243 dev_warn(&req->adapter->ccw_device->dev,
345 zfcp_fsf_link_down_info_eval(fsf_req, 37, 244 "The local link is down: "
346 &prot_status_qual->link_down_info); 245 "duplicate or invalid WWPN detected.\n");
347 /* FIXME: reopening adapter now? better wait for link up */
348 zfcp_erp_adapter_reopen(adapter, 0, 79, fsf_req);
349 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
350 break; 246 break;
351 247 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
352 case FSF_PROT_REEST_QUEUE: 248 dev_warn(&req->adapter->ccw_device->dev,
353 ZFCP_LOG_NORMAL("The local link to adapter with " 249 "The local link is down: "
354 "%s was re-plugged. " 250 "no support for NPIV by Fabric.\n");
355 "Re-starting operations on this adapter.\n",
356 zfcp_get_busid_by_adapter(adapter));
357 /* All ports should be marked as ready to run again */
358 zfcp_erp_modify_adapter_status(adapter, 28, NULL,
359 ZFCP_STATUS_COMMON_RUNNING,
360 ZFCP_SET);
361 zfcp_erp_adapter_reopen(adapter,
362 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
363 | ZFCP_STATUS_COMMON_ERP_FAILED,
364 99, fsf_req);
365 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
366 break; 251 break;
367 252 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
368 case FSF_PROT_ERROR_STATE: 253 dev_warn(&req->adapter->ccw_device->dev,
369 ZFCP_LOG_NORMAL("error: The adapter %s " 254 "The local link is down: "
370 "has entered the error state. " 255 "out of resource in FCP daughtercard.\n");
371 "Restarting all operations on this " 256 break;
372 "adapter.\n", 257 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
373 zfcp_get_busid_by_adapter(adapter)); 258 dev_warn(&req->adapter->ccw_device->dev,
374 zfcp_erp_adapter_reopen(adapter, 0, 100, fsf_req); 259 "The local link is down: "
375 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; 260 "out of resource in Fabric.\n");
376 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 261 break;
262 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
263 dev_warn(&req->adapter->ccw_device->dev,
264 "The local link is down: "
265 "unable to login to Fabric.\n");
266 break;
267 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
268 dev_warn(&req->adapter->ccw_device->dev,
269 "WWPN assignment file corrupted on adapter.\n");
270 break;
271 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
272 dev_warn(&req->adapter->ccw_device->dev,
273 "Mode table corrupted on adapter.\n");
274 break;
275 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
276 dev_warn(&req->adapter->ccw_device->dev,
277 "No WWPN for assignment table on adapter.\n");
377 break; 278 break;
378
379 default: 279 default:
380 ZFCP_LOG_NORMAL("bug: Transfer protocol status information " 280 dev_warn(&req->adapter->ccw_device->dev,
381 "provided by the adapter %s " 281 "The local link to adapter is down.\n");
382 "is not compatible with the device driver. "
383 "Stopping all operations on this adapter. "
384 "(debug info 0x%x).\n",
385 zfcp_get_busid_by_adapter(adapter),
386 qtcb->prefix.prot_status);
387 zfcp_erp_adapter_shutdown(adapter, 0, 119, fsf_req);
388 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
389 } 282 }
283out:
284 zfcp_erp_adapter_failed(adapter, id, req);
285}
390 286
391 skip_protstatus: 287static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
392 /* 288{
393 * always call specific handlers to give them a chance to do 289 struct zfcp_adapter *adapter = req->adapter;
394 * something meaningful even in error cases 290 struct fsf_status_read_buffer *sr_buf = req->data;
395 */ 291 struct fsf_link_down_info *ldi =
396 zfcp_fsf_fsfstatus_eval(fsf_req); 292 (struct fsf_link_down_info *) &sr_buf->payload;
397 return retval; 293
294 switch (sr_buf->status_subtype) {
295 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
296 dev_warn(&adapter->ccw_device->dev,
297 "Physical link is down.\n");
298 zfcp_fsf_link_down_info_eval(req, 38, ldi);
299 break;
300 case FSF_STATUS_READ_SUB_FDISC_FAILED:
301 dev_warn(&adapter->ccw_device->dev,
302 "Local link is down "
303 "due to failed FDISC login.\n");
304 zfcp_fsf_link_down_info_eval(req, 39, ldi);
305 break;
306 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
307 dev_warn(&adapter->ccw_device->dev,
308 "Local link is down "
309 "due to firmware update on adapter.\n");
310 zfcp_fsf_link_down_info_eval(req, 40, NULL);
311 };
398} 312}
399 313
400/* 314static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
401 * function: zfcp_fsf_fsfstatus_eval
402 *
403 * purpose: evaluates FSF status of completed FSF request
404 * and acts accordingly
405 *
406 * returns:
407 */
408static int
409zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
410{ 315{
411 int retval = 0; 316 struct zfcp_adapter *adapter = req->adapter;
317 struct fsf_status_read_buffer *sr_buf = req->data;
412 318
413 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 319 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
414 goto skip_fsfstatus; 320 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf);
321 mempool_free(sr_buf, adapter->pool.data_status_read);
322 zfcp_fsf_req_free(req);
323 return;
415 } 324 }
416 325
417 /* evaluate FSF Status */ 326 zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf);
418 switch (fsf_req->qtcb->header.fsf_status) {
419 case FSF_UNKNOWN_COMMAND:
420 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
421 "not known by the adapter %s "
422 "Stopping all operations on this adapter. "
423 "(debug info 0x%x).\n",
424 zfcp_get_busid_by_adapter(fsf_req->adapter),
425 fsf_req->qtcb->header.fsf_command);
426 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 120, fsf_req);
427 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
428 break;
429 327
430 case FSF_FCP_RSP_AVAILABLE: 328 switch (sr_buf->status_type) {
431 ZFCP_LOG_DEBUG("FCP Sense data will be presented to the " 329 case FSF_STATUS_READ_PORT_CLOSED:
432 "SCSI stack.\n"); 330 zfcp_fsf_status_read_port_closed(req);
433 break; 331 break;
434 332 case FSF_STATUS_READ_INCOMING_ELS:
435 case FSF_ADAPTER_STATUS_AVAILABLE: 333 zfcp_fc_incoming_els(req);
436 zfcp_fsf_fsfstatus_qual_eval(fsf_req); 334 break;
335 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
336 break;
337 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
338 zfcp_fsf_bit_error_threshold(req);
339 break;
340 case FSF_STATUS_READ_LINK_DOWN:
341 zfcp_fsf_status_read_link_down(req);
342 break;
343 case FSF_STATUS_READ_LINK_UP:
344 dev_info(&adapter->ccw_device->dev,
345 "Local link was replugged.\n");
346 /* All ports should be marked as ready to run again */
347 zfcp_erp_modify_adapter_status(adapter, 30, NULL,
348 ZFCP_STATUS_COMMON_RUNNING,
349 ZFCP_SET);
350 zfcp_erp_adapter_reopen(adapter,
351 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
352 ZFCP_STATUS_COMMON_ERP_FAILED,
353 102, req);
354 break;
355 case FSF_STATUS_READ_NOTIFICATION_LOST:
356 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
357 zfcp_erp_adapter_access_changed(adapter, 135, req);
358 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
359 schedule_work(&adapter->scan_work);
360 break;
361 case FSF_STATUS_READ_CFDC_UPDATED:
362 zfcp_erp_adapter_access_changed(adapter, 136, req);
363 break;
364 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
365 adapter->adapter_features = sr_buf->payload.word[0];
437 break; 366 break;
438 } 367 }
439 368
440 skip_fsfstatus: 369 mempool_free(sr_buf, adapter->pool.data_status_read);
441 /* 370 zfcp_fsf_req_free(req);
442 * always call specific handlers to give them a chance to do
443 * something meaningful even in error cases
444 */
445 zfcp_fsf_req_dispatch(fsf_req);
446 371
447 return retval; 372 atomic_inc(&adapter->stat_miss);
373 schedule_work(&adapter->stat_work);
448} 374}
449 375
450/* 376static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
451 * function: zfcp_fsf_fsfstatus_qual_eval
452 *
453 * purpose: evaluates FSF status-qualifier of completed FSF request
454 * and acts accordingly
455 *
456 * returns:
457 */
458static int
459zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
460{ 377{
461 int retval = 0; 378 switch (req->qtcb->header.fsf_status_qual.word[0]) {
462
463 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
464 case FSF_SQ_FCP_RSP_AVAILABLE: 379 case FSF_SQ_FCP_RSP_AVAILABLE:
465 break;
466 case FSF_SQ_RETRY_IF_POSSIBLE:
467 /* The SCSI-stack may now issue retries or escalate */
468 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
469 break;
470 case FSF_SQ_COMMAND_ABORTED:
471 /* Carry the aborted state on to upper layer */
472 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
473 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
474 break;
475 case FSF_SQ_NO_RECOM:
476 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a "
477 "problem on the adapter %s "
478 "Stopping all operations on this adapter. ",
479 zfcp_get_busid_by_adapter(fsf_req->adapter));
480 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 121, fsf_req);
481 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
482 break;
483 case FSF_SQ_ULP_PROGRAMMING_ERROR:
484 ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
485 "(adapter %s)\n",
486 zfcp_get_busid_by_adapter(fsf_req->adapter));
487 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
488 break;
489 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 380 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
490 case FSF_SQ_NO_RETRY_POSSIBLE: 381 case FSF_SQ_NO_RETRY_POSSIBLE:
491 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 382 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
492 /* dealt with in the respective functions */ 383 return;
384 case FSF_SQ_COMMAND_ABORTED:
385 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
493 break; 386 break;
494 default: 387 case FSF_SQ_NO_RECOM:
495 ZFCP_LOG_NORMAL("bug: Additional status info could " 388 dev_err(&req->adapter->ccw_device->dev,
496 "not be interpreted properly.\n"); 389 "No recommendation could be given for a "
497 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, 390 "problem on the adapter.\n");
498 (char *) &fsf_req->qtcb->header.fsf_status_qual, 391 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
499 sizeof (union fsf_status_qual));
500 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
501 break; 392 break;
502 } 393 }
503 394 /* all non-return stats set FSFREQ_ERROR*/
504 return retval; 395 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
505} 396}
506 397
507/** 398static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
508 * zfcp_fsf_link_down_info_eval - evaluate link down information block
509 */
510static void
511zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *fsf_req, u8 id,
512 struct fsf_link_down_info *link_down)
513{ 399{
514 struct zfcp_adapter *adapter = fsf_req->adapter; 400 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
515
516 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
517 &adapter->status))
518 return; 401 return;
519 402
520 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 403 switch (req->qtcb->header.fsf_status) {
521 404 case FSF_UNKNOWN_COMMAND:
522 if (link_down == NULL) 405 dev_err(&req->adapter->ccw_device->dev,
523 goto out; 406 "Command issued by the device driver (0x%x) is "
524 407 "not known by the adapter.\n",
525 switch (link_down->error_code) { 408 req->qtcb->header.fsf_command);
526 case FSF_PSQ_LINK_NO_LIGHT: 409 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
527 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 410 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
528 "(no light detected)\n",
529 zfcp_get_busid_by_adapter(adapter));
530 break;
531 case FSF_PSQ_LINK_WRAP_PLUG:
532 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
533 "(wrap plug detected)\n",
534 zfcp_get_busid_by_adapter(adapter));
535 break;
536 case FSF_PSQ_LINK_NO_FCP:
537 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
538 "(adjacent node on link does not support FCP)\n",
539 zfcp_get_busid_by_adapter(adapter));
540 break; 411 break;
541 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 412 case FSF_ADAPTER_STATUS_AVAILABLE:
542 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 413 zfcp_fsf_fsfstatus_qual_eval(req);
543 "(firmware update in progress)\n",
544 zfcp_get_busid_by_adapter(adapter));
545 break;
546 case FSF_PSQ_LINK_INVALID_WWPN:
547 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
548 "(duplicate or invalid WWPN detected)\n",
549 zfcp_get_busid_by_adapter(adapter));
550 break; 414 break;
551 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 415 }
552 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 416}
553 "(no support for NPIV by Fabric)\n", 417
554 zfcp_get_busid_by_adapter(adapter)); 418static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
419{
420 struct zfcp_adapter *adapter = req->adapter;
421 struct fsf_qtcb *qtcb = req->qtcb;
422 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
423
424 zfcp_hba_dbf_event_fsf_response(req);
425
426 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
427 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
428 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
429 return;
430 }
431
432 switch (qtcb->prefix.prot_status) {
433 case FSF_PROT_GOOD:
434 case FSF_PROT_FSF_STATUS_PRESENTED:
435 return;
436 case FSF_PROT_QTCB_VERSION_ERROR:
437 dev_err(&adapter->ccw_device->dev,
438 "The QTCB version requested by zfcp (0x%x) is not "
439 "supported by the FCP adapter (lowest supported "
440 "0x%x, highest supported 0x%x).\n",
441 FSF_QTCB_CURRENT_VERSION, psq->word[0],
442 psq->word[1]);
443 zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
555 break; 444 break;
556 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 445 case FSF_PROT_ERROR_STATE:
557 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 446 case FSF_PROT_SEQ_NUMB_ERROR:
558 "(out of resource in FCP daughtercard)\n", 447 zfcp_erp_adapter_reopen(adapter, 0, 98, req);
559 zfcp_get_busid_by_adapter(adapter)); 448 req->status |= ZFCP_STATUS_FSFREQ_RETRY;
560 break; 449 break;
561 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 450 case FSF_PROT_UNSUPP_QTCB_TYPE:
562 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 451 dev_err(&adapter->ccw_device->dev,
563 "(out of resource in Fabric)\n", 452 "Packet header type used by the device driver is "
564 zfcp_get_busid_by_adapter(adapter)); 453 "incompatible with that used on the adapter.\n");
454 zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
565 break; 455 break;
566 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 456 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
567 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 457 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
568 "(unable to Fabric login)\n", 458 &adapter->status);
569 zfcp_get_busid_by_adapter(adapter));
570 break; 459 break;
571 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 460 case FSF_PROT_DUPLICATE_REQUEST_ID:
572 ZFCP_LOG_NORMAL("WWPN assignment file corrupted on adapter %s\n", 461 dev_err(&adapter->ccw_device->dev,
573 zfcp_get_busid_by_adapter(adapter)); 462 "The request identifier 0x%Lx is ambiguous.\n",
463 (unsigned long long)qtcb->bottom.support.req_handle);
464 zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
574 break; 465 break;
575 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 466 case FSF_PROT_LINK_DOWN:
576 ZFCP_LOG_NORMAL("Mode table corrupted on adapter %s\n", 467 zfcp_fsf_link_down_info_eval(req, 37, &psq->link_down_info);
577 zfcp_get_busid_by_adapter(adapter)); 468 /* FIXME: reopening adapter now? better wait for link up */
469 zfcp_erp_adapter_reopen(adapter, 0, 79, req);
578 break; 470 break;
579 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 471 case FSF_PROT_REEST_QUEUE:
580 ZFCP_LOG_NORMAL("No WWPN for assignment table on adapter %s\n", 472 /* All ports should be marked as ready to run again */
581 zfcp_get_busid_by_adapter(adapter)); 473 zfcp_erp_modify_adapter_status(adapter, 28, NULL,
474 ZFCP_STATUS_COMMON_RUNNING,
475 ZFCP_SET);
476 zfcp_erp_adapter_reopen(adapter,
477 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
478 ZFCP_STATUS_COMMON_ERP_FAILED, 99, req);
582 break; 479 break;
583 default: 480 default:
584 ZFCP_LOG_NORMAL("The local link to adapter %s is down " 481 dev_err(&adapter->ccw_device->dev,
585 "(warning: unknown reason code %d)\n", 482 "Transfer protocol status information"
586 zfcp_get_busid_by_adapter(adapter), 483 "provided by the adapter (0x%x) "
587 link_down->error_code); 484 "is not compatible with the device driver.\n",
588 } 485 qtcb->prefix.prot_status);
589 486 zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
590 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 487 }
591 ZFCP_LOG_DEBUG("Debug information to link down: " 488 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
592 "primary_status=0x%02x "
593 "ioerr_code=0x%02x "
594 "action_code=0x%02x "
595 "reason_code=0x%02x "
596 "explanation_code=0x%02x "
597 "vendor_specific_code=0x%02x\n",
598 link_down->primary_status,
599 link_down->ioerr_code,
600 link_down->action_code,
601 link_down->reason_code,
602 link_down->explanation_code,
603 link_down->vendor_specific_code);
604
605 out:
606 zfcp_erp_adapter_failed(adapter, id, fsf_req);
607} 489}
608 490
609/* 491/**
610 * function: zfcp_fsf_req_dispatch 492 * zfcp_fsf_req_complete - process completion of a FSF request
611 * 493 * @fsf_req: The FSF request that has been completed.
612 * purpose: calls the appropriate command specific handler
613 * 494 *
614 * returns: 495 * When a request has been completed either from the FCP adapter,
496 * or it has been dismissed due to a queue shutdown, this function
497 * is called to process the completion status and trigger further
498 * events related to the FSF request.
615 */ 499 */
616static int 500void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
617zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
618{ 501{
619 struct zfcp_erp_action *erp_action = fsf_req->erp_action; 502 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
620 struct zfcp_adapter *adapter = fsf_req->adapter; 503 zfcp_fsf_status_read_handler(req);
621 int retval = 0; 504 return;
505 }
622 506
507 del_timer(&req->timer);
508 zfcp_fsf_protstatus_eval(req);
509 zfcp_fsf_fsfstatus_eval(req);
510 req->handler(req);
623 511
624 switch (fsf_req->fsf_command) { 512 if (req->erp_action)
513 zfcp_erp_notify(req->erp_action, 0);
514 req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
625 515
626 case FSF_QTCB_FCP_CMND: 516 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
627 zfcp_fsf_send_fcp_command_handler(fsf_req); 517 zfcp_fsf_req_free(req);
628 break; 518 else
519 /* notify initiator waiting for the requests completion */
520 /*
521 * FIXME: Race! We must not access fsf_req here as it might have been
522 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
523 * flag. It's an improbable case. But, we have the same paranoia for
524 * the cleanup flag already.
525 * Might better be handled using complete()?
526 * (setting the flag and doing wakeup ought to be atomic
527 * with regard to checking the flag as long as waitqueue is
528 * part of the to be released structure)
529 */
530 wake_up(&req->completion_wq);
531}
629 532
630 case FSF_QTCB_ABORT_FCP_CMND: 533static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
631 zfcp_fsf_abort_fcp_command_handler(fsf_req); 534{
632 break; 535 struct fsf_qtcb_bottom_config *bottom;
536 struct zfcp_adapter *adapter = req->adapter;
537 struct Scsi_Host *shost = adapter->scsi_host;
633 538
634 case FSF_QTCB_SEND_GENERIC: 539 bottom = &req->qtcb->bottom.config;
635 zfcp_fsf_send_ct_handler(fsf_req);
636 break;
637 540
638 case FSF_QTCB_OPEN_PORT_WITH_DID: 541 if (req->data)
639 zfcp_fsf_open_port_handler(fsf_req); 542 memcpy(req->data, bottom, sizeof(*bottom));
640 break;
641 543
642 case FSF_QTCB_OPEN_LUN: 544 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
643 zfcp_fsf_open_unit_handler(fsf_req); 545 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
644 break; 546 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
547 fc_host_speed(shost) = bottom->fc_link_speed;
548 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
645 549
646 case FSF_QTCB_CLOSE_LUN: 550 adapter->hydra_version = bottom->adapter_type;
647 zfcp_fsf_close_unit_handler(fsf_req); 551 adapter->timer_ticks = bottom->timer_interval;
648 break;
649 552
650 case FSF_QTCB_CLOSE_PORT: 553 if (fc_host_permanent_port_name(shost) == -1)
651 zfcp_fsf_close_port_handler(fsf_req); 554 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
652 break;
653 555
654 case FSF_QTCB_CLOSE_PHYSICAL_PORT: 556 switch (bottom->fc_topology) {
655 zfcp_fsf_close_physical_port_handler(fsf_req); 557 case FSF_TOPO_P2P:
656 break; 558 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
559 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
560 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
561 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
562 if (req->erp_action)
563 dev_info(&adapter->ccw_device->dev,
564 "Point-to-Point fibrechannel "
565 "configuration detected.\n");
566 break;
567 case FSF_TOPO_FABRIC:
568 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
569 if (req->erp_action)
570 dev_info(&adapter->ccw_device->dev,
571 "Switched fabric fibrechannel "
572 "network detected.\n");
573 break;
574 case FSF_TOPO_AL:
575 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
576 dev_err(&adapter->ccw_device->dev,
577 "Unsupported arbitrated loop fibrechannel "
578 "topology detected, shutting down "
579 "adapter.\n");
580 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
581 return -EIO;
582 default:
583 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
584 dev_err(&adapter->ccw_device->dev,
585 "The fibrechannel topology reported by the"
586 " adapter is not known by the zfcp driver,"
587 " shutting down adapter.\n");
588 zfcp_erp_adapter_shutdown(adapter, 0, 128, req);
589 return -EIO;
590 }
657 591
658 case FSF_QTCB_EXCHANGE_CONFIG_DATA: 592 return 0;
659 zfcp_fsf_exchange_config_data_handler(fsf_req); 593}
660 break;
661 594
662 case FSF_QTCB_EXCHANGE_PORT_DATA: 595static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
663 zfcp_fsf_exchange_port_data_handler(fsf_req); 596{
664 break; 597 struct zfcp_adapter *adapter = req->adapter;
598 struct fsf_qtcb *qtcb = req->qtcb;
599 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
600 struct Scsi_Host *shost = adapter->scsi_host;
665 601
666 case FSF_QTCB_SEND_ELS: 602 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
667 zfcp_fsf_send_els_handler(fsf_req); 603 return;
668 break;
669 604
670 case FSF_QTCB_DOWNLOAD_CONTROL_FILE: 605 adapter->fsf_lic_version = bottom->lic_version;
671 zfcp_fsf_control_file_handler(fsf_req); 606 adapter->adapter_features = bottom->adapter_features;
672 break; 607 adapter->connection_features = bottom->connection_features;
608 adapter->peer_wwpn = 0;
609 adapter->peer_wwnn = 0;
610 adapter->peer_d_id = 0;
673 611
674 case FSF_QTCB_UPLOAD_CONTROL_FILE: 612 switch (qtcb->header.fsf_status) {
675 zfcp_fsf_control_file_handler(fsf_req); 613 case FSF_GOOD:
614 if (zfcp_fsf_exchange_config_evaluate(req))
615 return;
616
617 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
618 dev_err(&adapter->ccw_device->dev,
619 "Maximum QTCB size (%d bytes) allowed by "
620 "the adapter is lower than the minimum "
621 "required by the driver (%ld bytes).\n",
622 bottom->max_qtcb_size,
623 sizeof(struct fsf_qtcb));
624 zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
625 return;
626 }
627 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
628 &adapter->status);
676 break; 629 break;
630 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
631 fc_host_node_name(shost) = 0;
632 fc_host_port_name(shost) = 0;
633 fc_host_port_id(shost) = 0;
634 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
635 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
636 adapter->hydra_version = 0;
677 637
638 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
639 &adapter->status);
640
641 zfcp_fsf_link_down_info_eval(req, 42,
642 &qtcb->header.fsf_status_qual.link_down_info);
643 break;
678 default: 644 default:
679 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 645 zfcp_erp_adapter_shutdown(adapter, 0, 130, req);
680 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is " 646 return;
681 "not supported by the adapter %s\n",
682 zfcp_get_busid_by_adapter(adapter));
683 if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
684 ZFCP_LOG_NORMAL
685 ("bug: Command issued by the device driver differs "
686 "from the command returned by the adapter %s "
687 "(debug info 0x%x, 0x%x).\n",
688 zfcp_get_busid_by_adapter(adapter),
689 fsf_req->fsf_command,
690 fsf_req->qtcb->header.fsf_command);
691 } 647 }
692 648
693 if (!erp_action) 649 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
694 return retval; 650 adapter->hardware_version = bottom->hardware_version;
695 651 memcpy(fc_host_serial_number(shost), bottom->serial_number,
696 zfcp_erp_async_handler(erp_action, 0); 652 min(FC_SERIAL_NUMBER_SIZE, 17));
653 EBCASC(fc_host_serial_number(shost),
654 min(FC_SERIAL_NUMBER_SIZE, 17));
655 }
697 656
698 return retval; 657 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
658 dev_err(&adapter->ccw_device->dev,
659 "The adapter only supports newer control block "
660 "versions, try updated device driver.\n");
661 zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
662 return;
663 }
664 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
665 dev_err(&adapter->ccw_device->dev,
666 "The adapter only supports older control block "
667 "versions, consider a microcode upgrade.\n");
668 zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
669 }
699} 670}
700 671
701/* 672static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
702 * function: zfcp_fsf_status_read
703 *
704 * purpose: initiates a Status Read command at the specified adapter
705 *
706 * returns:
707 */
708int
709zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
710{ 673{
711 struct zfcp_fsf_req *fsf_req; 674 struct zfcp_adapter *adapter = req->adapter;
712 struct fsf_status_read_buffer *status_buffer; 675 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
713 unsigned long lock_flags; 676 struct Scsi_Host *shost = adapter->scsi_host;
714 volatile struct qdio_buffer_element *sbale;
715 int retval = 0;
716
717 /* setup new FSF request */
718 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
719 req_flags | ZFCP_REQ_NO_QTCB,
720 adapter->pool.fsf_req_status_read,
721 &lock_flags, &fsf_req);
722 if (retval < 0) {
723 ZFCP_LOG_INFO("error: Could not create unsolicited status "
724 "buffer for adapter %s.\n",
725 zfcp_get_busid_by_adapter(adapter));
726 goto failed_req_create;
727 }
728
729 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
730 sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
731 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
732 fsf_req->sbale_curr = 2;
733 677
734 status_buffer = 678 if (req->data)
735 mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC); 679 memcpy(req->data, bottom, sizeof(*bottom));
736 if (!status_buffer) {
737 ZFCP_LOG_NORMAL("bug: could not get some buffer\n");
738 goto failed_buf;
739 }
740 memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
741 fsf_req->data = (unsigned long) status_buffer;
742 680
743 /* insert pointer to respective buffer */ 681 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
744 sbale = zfcp_qdio_sbale_curr(fsf_req); 682 fc_host_permanent_port_name(shost) = bottom->wwpn;
745 sbale->addr = (void *) status_buffer; 683 else
746 sbale->length = sizeof(struct fsf_status_read_buffer); 684 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
685 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
686 fc_host_supported_speeds(shost) = bottom->supported_speed;
687}
747 688
748 retval = zfcp_fsf_req_send(fsf_req); 689static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
749 if (retval) { 690{
750 ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status " 691 struct zfcp_adapter *adapter = req->adapter;
751 "environment.\n"); 692 struct fsf_qtcb *qtcb = req->qtcb;
752 goto failed_req_send;
753 }
754 693
755 ZFCP_LOG_TRACE("Status Read request initiated (adapter%s)\n", 694 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
756 zfcp_get_busid_by_adapter(adapter)); 695 return;
757 goto out;
758 696
759 failed_req_send: 697 switch (qtcb->header.fsf_status) {
760 mempool_free(status_buffer, adapter->pool.data_status_read); 698 case FSF_GOOD:
699 zfcp_fsf_exchange_port_evaluate(req);
700 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
701 break;
702 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
703 zfcp_fsf_exchange_port_evaluate(req);
704 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
705 zfcp_fsf_link_down_info_eval(req, 43,
706 &qtcb->header.fsf_status_qual.link_down_info);
707 break;
708 }
709}
761 710
762 failed_buf: 711static int zfcp_fsf_sbal_check(struct zfcp_qdio_queue *queue)
763 zfcp_fsf_req_free(fsf_req); 712{
764 failed_req_create: 713 spin_lock(&queue->lock);
765 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); 714 if (atomic_read(&queue->count))
766 out: 715 return 1;
767 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 716 spin_unlock(&queue->lock);
768 return retval; 717 return 0;
769} 718}
770 719
771static int 720static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
772zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
773{ 721{
774 struct fsf_status_read_buffer *status_buffer; 722 long ret;
775 struct zfcp_adapter *adapter; 723 struct zfcp_qdio_queue *req_q = &adapter->req_q;
776 struct zfcp_port *port;
777 unsigned long flags;
778 724
779 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data; 725 spin_unlock(&req_q->lock);
780 adapter = fsf_req->adapter; 726 ret = wait_event_interruptible_timeout(adapter->request_wq,
727 zfcp_fsf_sbal_check(req_q), 5 * HZ);
728 if (ret > 0)
729 return 0;
781 730
782 read_lock_irqsave(&zfcp_data.config_lock, flags); 731 spin_lock(&req_q->lock);
783 list_for_each_entry(port, &adapter->port_list_head, list) 732 return -EIO;
784 if (port->d_id == (status_buffer->d_id & ZFCP_DID_MASK)) 733}
785 break;
786 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
787 734
788 if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) { 735static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool)
789 ZFCP_LOG_NORMAL("bug: Reopen port indication received for " 736{
790 "nonexisting port with d_id 0x%06x on " 737 struct zfcp_fsf_req *req;
791 "adapter %s. Ignored.\n", 738 req = mempool_alloc(pool, GFP_ATOMIC);
792 status_buffer->d_id & ZFCP_DID_MASK, 739 if (!req)
793 zfcp_get_busid_by_adapter(adapter)); 740 return NULL;
794 goto out; 741 memset(req, 0, sizeof(*req));
795 } 742 return req;
743}
796 744
797 switch (status_buffer->status_subtype) { 745static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool)
746{
747 struct zfcp_fsf_req_qtcb *qtcb;
798 748
799 case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT: 749 if (likely(pool))
800 zfcp_erp_port_reopen(port, 0, 101, fsf_req); 750 qtcb = mempool_alloc(pool, GFP_ATOMIC);
801 break; 751 else
752 qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
753 GFP_ATOMIC);
754 if (unlikely(!qtcb))
755 return NULL;
802 756
803 case FSF_STATUS_READ_SUB_ERROR_PORT: 757 memset(qtcb, 0, sizeof(*qtcb));
804 zfcp_erp_port_shutdown(port, 0, 122, fsf_req); 758 qtcb->fsf_req.qtcb = &qtcb->qtcb;
805 break; 759 qtcb->fsf_req.pool = pool;
806 760
807 default: 761 return &qtcb->fsf_req;
808 ZFCP_LOG_NORMAL("bug: Undefined status subtype received "
809 "for a reopen indication on port with "
810 "d_id 0x%06x on the adapter %s. "
811 "Ignored. (debug info 0x%x)\n",
812 status_buffer->d_id,
813 zfcp_get_busid_by_adapter(adapter),
814 status_buffer->status_subtype);
815 }
816 out:
817 return 0;
818} 762}
819 763
820/* 764static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
821 * function: zfcp_fsf_status_read_handler 765 u32 fsf_cmd, int req_flags,
822 * 766 mempool_t *pool)
823 * purpose: is called for finished Open Port command
824 *
825 * returns:
826 */
827static int
828zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
829{ 767{
830 int retval = 0; 768 volatile struct qdio_buffer_element *sbale;
831 struct zfcp_adapter *adapter = fsf_req->adapter;
832 struct fsf_status_read_buffer *status_buffer =
833 (struct fsf_status_read_buffer *) fsf_req->data;
834 struct fsf_bit_error_payload *fsf_bit_error;
835
836 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
837 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, status_buffer);
838 mempool_free(status_buffer, adapter->pool.data_status_read);
839 zfcp_fsf_req_free(fsf_req);
840 goto out;
841 }
842 769
843 zfcp_hba_dbf_event_fsf_unsol("read", adapter, status_buffer); 770 struct zfcp_fsf_req *req;
771 struct zfcp_qdio_queue *req_q = &adapter->req_q;
844 772
845 switch (status_buffer->status_type) { 773 if (req_flags & ZFCP_REQ_NO_QTCB)
774 req = zfcp_fsf_alloc_noqtcb(pool);
775 else
776 req = zfcp_fsf_alloc_qtcb(pool);
846 777
847 case FSF_STATUS_READ_PORT_CLOSED: 778 if (unlikely(!req))
848 zfcp_fsf_status_read_port_closed(fsf_req); 779 return ERR_PTR(-EIO);
849 break;
850 780
851 case FSF_STATUS_READ_INCOMING_ELS: 781 if (adapter->req_no == 0)
852 zfcp_fsf_incoming_els(fsf_req); 782 adapter->req_no++;
853 break;
854 783
855 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 784 INIT_LIST_HEAD(&req->list);
856 ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n", 785 init_timer(&req->timer);
857 zfcp_get_busid_by_adapter(adapter)); 786 init_waitqueue_head(&req->completion_wq);
858 break;
859 787
860 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 788 req->adapter = adapter;
861 fsf_bit_error = (struct fsf_bit_error_payload *) 789 req->fsf_command = fsf_cmd;
862 status_buffer->payload; 790 req->req_id = adapter->req_no++;
863 ZFCP_LOG_NORMAL("Warning: bit error threshold data " 791 req->sbal_number = 1;
864 "received (adapter %s, " 792 req->sbal_first = req_q->first;
865 "link failures = %i, loss of sync errors = %i, " 793 req->sbal_last = req_q->first;
866 "loss of signal errors = %i, " 794 req->sbale_curr = 1;
867 "primitive sequence errors = %i, "
868 "invalid transmission word errors = %i, "
869 "CRC errors = %i)\n",
870 zfcp_get_busid_by_adapter(adapter),
871 fsf_bit_error->link_failure_error_count,
872 fsf_bit_error->loss_of_sync_error_count,
873 fsf_bit_error->loss_of_signal_error_count,
874 fsf_bit_error->primitive_sequence_error_count,
875 fsf_bit_error->invalid_transmission_word_error_count,
876 fsf_bit_error->crc_error_count);
877 ZFCP_LOG_INFO("Additional bit error threshold data "
878 "(adapter %s, "
879 "primitive sequence event time-outs = %i, "
880 "elastic buffer overrun errors = %i, "
881 "advertised receive buffer-to-buffer credit = %i, "
882 "current receice buffer-to-buffer credit = %i, "
883 "advertised transmit buffer-to-buffer credit = %i, "
884 "current transmit buffer-to-buffer credit = %i)\n",
885 zfcp_get_busid_by_adapter(adapter),
886 fsf_bit_error->primitive_sequence_event_timeout_count,
887 fsf_bit_error->elastic_buffer_overrun_error_count,
888 fsf_bit_error->advertised_receive_b2b_credit,
889 fsf_bit_error->current_receive_b2b_credit,
890 fsf_bit_error->advertised_transmit_b2b_credit,
891 fsf_bit_error->current_transmit_b2b_credit);
892 break;
893 795
894 case FSF_STATUS_READ_LINK_DOWN: 796 sbale = zfcp_qdio_sbale_req(req);
895 switch (status_buffer->status_subtype) { 797 sbale[0].addr = (void *) req->req_id;
896 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 798 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
897 ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
898 zfcp_get_busid_by_adapter(adapter));
899 zfcp_fsf_link_down_info_eval(fsf_req, 38,
900 (struct fsf_link_down_info *)
901 &status_buffer->payload);
902 break;
903 case FSF_STATUS_READ_SUB_FDISC_FAILED:
904 ZFCP_LOG_INFO("Local link to adapter %s is down "
905 "due to failed FDISC login\n",
906 zfcp_get_busid_by_adapter(adapter));
907 zfcp_fsf_link_down_info_eval(fsf_req, 39,
908 (struct fsf_link_down_info *)
909 &status_buffer->payload);
910 break;
911 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
912 ZFCP_LOG_INFO("Local link to adapter %s is down "
913 "due to firmware update on adapter\n",
914 zfcp_get_busid_by_adapter(adapter));
915 zfcp_fsf_link_down_info_eval(fsf_req, 40, NULL);
916 break;
917 default:
918 ZFCP_LOG_INFO("Local link to adapter %s is down "
919 "due to unknown reason\n",
920 zfcp_get_busid_by_adapter(adapter));
921 zfcp_fsf_link_down_info_eval(fsf_req, 41, NULL);
922 };
923 break;
924 799
925 case FSF_STATUS_READ_LINK_UP: 800 if (likely(req->qtcb)) {
926 ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. " 801 req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no;
927 "Restarting operations on this adapter\n", 802 req->qtcb->prefix.req_id = req->req_id;
928 zfcp_get_busid_by_adapter(adapter)); 803 req->qtcb->prefix.ulp_info = 26;
929 /* All ports should be marked as ready to run again */ 804 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
930 zfcp_erp_modify_adapter_status(adapter, 30, NULL, 805 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
931 ZFCP_STATUS_COMMON_RUNNING, 806 req->qtcb->header.req_handle = req->req_id;
932 ZFCP_SET); 807 req->qtcb->header.fsf_command = req->fsf_command;
933 zfcp_erp_adapter_reopen(adapter, 808 req->seq_no = adapter->fsf_req_seq_no;
934 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 809 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
935 | ZFCP_STATUS_COMMON_ERP_FAILED, 810 sbale[1].addr = (void *) req->qtcb;
936 102, fsf_req); 811 sbale[1].length = sizeof(struct fsf_qtcb);
937 break; 812 }
938 813
939 case FSF_STATUS_READ_NOTIFICATION_LOST: 814 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
940 ZFCP_LOG_NORMAL("Unsolicited status notification(s) lost: " 815 zfcp_fsf_req_free(req);
941 "adapter %s%s%s%s%s%s%s%s%s\n", 816 return ERR_PTR(-EIO);
942 zfcp_get_busid_by_adapter(adapter), 817 }
943 (status_buffer->status_subtype &
944 FSF_STATUS_READ_SUB_INCOMING_ELS) ?
945 ", incoming ELS" : "",
946 (status_buffer->status_subtype &
947 FSF_STATUS_READ_SUB_SENSE_DATA) ?
948 ", sense data" : "",
949 (status_buffer->status_subtype &
950 FSF_STATUS_READ_SUB_LINK_STATUS) ?
951 ", link status change" : "",
952 (status_buffer->status_subtype &
953 FSF_STATUS_READ_SUB_PORT_CLOSED) ?
954 ", port close" : "",
955 (status_buffer->status_subtype &
956 FSF_STATUS_READ_SUB_BIT_ERROR_THRESHOLD) ?
957 ", bit error exception" : "",
958 (status_buffer->status_subtype &
959 FSF_STATUS_READ_SUB_ACT_UPDATED) ?
960 ", ACT update" : "",
961 (status_buffer->status_subtype &
962 FSF_STATUS_READ_SUB_ACT_HARDENED) ?
963 ", ACT hardening" : "",
964 (status_buffer->status_subtype &
965 FSF_STATUS_READ_SUB_FEATURE_UPDATE_ALERT) ?
966 ", adapter feature change" : "");
967
968 if (status_buffer->status_subtype &
969 FSF_STATUS_READ_SUB_ACT_UPDATED)
970 zfcp_erp_adapter_access_changed(adapter, 135, fsf_req);
971 break;
972 818
973 case FSF_STATUS_READ_CFDC_UPDATED: 819 if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP))
974 ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n", 820 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
975 zfcp_get_busid_by_adapter(adapter));
976 zfcp_erp_adapter_access_changed(adapter, 136, fsf_req);
977 break;
978 821
979 case FSF_STATUS_READ_CFDC_HARDENED: 822 return req;
980 switch (status_buffer->status_subtype) { 823}
981 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
982 ZFCP_LOG_NORMAL("CFDC of adapter %s saved on SE\n",
983 zfcp_get_busid_by_adapter(adapter));
984 break;
985 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
986 ZFCP_LOG_NORMAL("CFDC of adapter %s has been copied "
987 "to the secondary SE\n",
988 zfcp_get_busid_by_adapter(adapter));
989 break;
990 default:
991 ZFCP_LOG_NORMAL("CFDC of adapter %s has been hardened\n",
992 zfcp_get_busid_by_adapter(adapter));
993 }
994 break;
995 824
996 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 825static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
997 ZFCP_LOG_INFO("List of supported features on adapter %s has " 826{
998 "been changed from 0x%08X to 0x%08X\n", 827 struct zfcp_adapter *adapter = req->adapter;
999 zfcp_get_busid_by_adapter(adapter), 828 struct zfcp_qdio_queue *req_q = &adapter->req_q;
1000 *(u32*) (status_buffer->payload + 4), 829 int idx;
1001 *(u32*) (status_buffer->payload));
1002 adapter->adapter_features = *(u32*) status_buffer->payload;
1003 break;
1004 830
1005 default: 831 /* put allocated FSF request into hash table */
1006 ZFCP_LOG_NORMAL("warning: An unsolicited status packet of unknown " 832 spin_lock(&adapter->req_list_lock);
1007 "type was received (debug info 0x%x)\n", 833 idx = zfcp_reqlist_hash(req->req_id);
1008 status_buffer->status_type); 834 list_add_tail(&req->list, &adapter->req_list[idx]);
1009 ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n", 835 spin_unlock(&adapter->req_list_lock);
1010 status_buffer); 836
1011 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 837 req->issued = get_clock();
1012 (char *) status_buffer, 838 if (zfcp_qdio_send(req)) {
1013 sizeof (struct fsf_status_read_buffer)); 839 /* Queues are down..... */
1014 break; 840 del_timer(&req->timer);
1015 } 841 spin_lock(&adapter->req_list_lock);
1016 mempool_free(status_buffer, adapter->pool.data_status_read); 842 zfcp_reqlist_remove(adapter, req);
1017 zfcp_fsf_req_free(fsf_req); 843 spin_unlock(&adapter->req_list_lock);
1018 /* 844 /* undo changes in request queue made for this request */
1019 * recycle buffer and start new request repeat until outbound 845 atomic_add(req->sbal_number, &req_q->count);
1020 * queue is empty or adapter shutdown is requested 846 req_q->first -= req->sbal_number;
1021 */ 847 req_q->first += QDIO_MAX_BUFFERS_PER_Q;
1022 /* 848 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
1023 * FIXME(qdio): 849 zfcp_erp_adapter_reopen(adapter, 0, 116, req);
1024 * we may wait in the req_create for 5s during shutdown, so 850 return -EIO;
1025 * qdio_cleanup will have to wait at least that long before returning
1026 * with failure to allow us a proper cleanup under all circumstances
1027 */
1028 /*
1029 * FIXME:
1030 * allocation failure possible? (Is this code needed?)
1031 */
1032 retval = zfcp_fsf_status_read(adapter, 0);
1033 if (retval < 0) {
1034 ZFCP_LOG_INFO("Failed to create unsolicited status read "
1035 "request for the adapter %s.\n",
1036 zfcp_get_busid_by_adapter(adapter));
1037 /* temporary fix to avoid status read buffer shortage */
1038 adapter->status_read_failed++;
1039 if ((ZFCP_STATUS_READS_RECOM - adapter->status_read_failed)
1040 < ZFCP_STATUS_READ_FAILED_THRESHOLD) {
1041 ZFCP_LOG_INFO("restart adapter %s due to status read "
1042 "buffer shortage\n",
1043 zfcp_get_busid_by_adapter(adapter));
1044 zfcp_erp_adapter_reopen(adapter, 0, 103, fsf_req);
1045 }
1046 } 851 }
1047 out: 852
1048 return retval; 853 /* Don't increase for unsolicited status */
854 if (req->qtcb)
855 adapter->fsf_req_seq_no++;
856
857 return 0;
1049} 858}
1050 859
1051/* 860/**
1052 * function: zfcp_fsf_abort_fcp_command 861 * zfcp_fsf_status_read - send status read request
1053 * 862 * @adapter: pointer to struct zfcp_adapter
1054 * purpose: tells FSF to abort a running SCSI command 863 * @req_flags: request flags
1055 * 864 * Returns: 0 on success, ERROR otherwise
1056 * returns: address of initiated FSF request
1057 * NULL - request could not be initiated
1058 *
1059 * FIXME(design): should be watched by a timeout !!!
1060 * FIXME(design) shouldn't this be modified to return an int
1061 * also...don't know how though
1062 */ 865 */
1063struct zfcp_fsf_req * 866int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
1064zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1065 struct zfcp_adapter *adapter,
1066 struct zfcp_unit *unit, int req_flags)
1067{ 867{
868 struct zfcp_fsf_req *req;
869 struct fsf_status_read_buffer *sr_buf;
1068 volatile struct qdio_buffer_element *sbale; 870 volatile struct qdio_buffer_element *sbale;
1069 struct zfcp_fsf_req *fsf_req = NULL; 871 int retval = -EIO;
1070 unsigned long lock_flags;
1071 int retval = 0;
1072
1073 /* setup new FSF request */
1074 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
1075 req_flags, adapter->pool.fsf_req_abort,
1076 &lock_flags, &fsf_req);
1077 if (retval < 0) {
1078 ZFCP_LOG_INFO("error: Failed to create an abort command "
1079 "request for lun 0x%016Lx on port 0x%016Lx "
1080 "on adapter %s.\n",
1081 unit->fcp_lun,
1082 unit->port->wwpn,
1083 zfcp_get_busid_by_adapter(adapter));
1084 goto out;
1085 }
1086
1087 if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
1088 &unit->status)))
1089 goto unit_blocked;
1090 872
1091 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 873 spin_lock(&adapter->req_q.lock);
1092 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 874 if (zfcp_fsf_req_sbal_get(adapter))
1093 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 875 goto out;
1094 876
1095 fsf_req->data = (unsigned long) unit; 877 req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
878 ZFCP_REQ_NO_QTCB,
879 adapter->pool.fsf_req_status_read);
880 if (unlikely(IS_ERR(req))) {
881 retval = PTR_ERR(req);
882 goto out;
883 }
1096 884
1097 /* set handles of unit and its parent port in QTCB */ 885 sbale = zfcp_qdio_sbale_req(req);
1098 fsf_req->qtcb->header.lun_handle = unit->handle; 886 sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
1099 fsf_req->qtcb->header.port_handle = unit->port->handle; 887 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
888 req->sbale_curr = 2;
1100 889
1101 /* set handle of request which should be aborted */ 890 sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
1102 fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id; 891 if (!sr_buf) {
892 retval = -ENOMEM;
893 goto failed_buf;
894 }
895 memset(sr_buf, 0, sizeof(*sr_buf));
896 req->data = sr_buf;
897 sbale = zfcp_qdio_sbale_curr(req);
898 sbale->addr = (void *) sr_buf;
899 sbale->length = sizeof(*sr_buf);
1103 900
1104 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT); 901 retval = zfcp_fsf_req_send(req);
1105 retval = zfcp_fsf_req_send(fsf_req); 902 if (retval)
1106 if (!retval) 903 goto failed_req_send;
1107 goto out;
1108 904
1109 unit_blocked: 905 goto out;
1110 zfcp_fsf_req_free(fsf_req);
1111 fsf_req = NULL;
1112 906
1113 out: 907failed_req_send:
1114 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 908 mempool_free(sr_buf, adapter->pool.data_status_read);
1115 return fsf_req; 909failed_buf:
910 zfcp_fsf_req_free(req);
911 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
912out:
913 spin_unlock(&adapter->req_q.lock);
914 return retval;
1116} 915}
1117 916
1118/* 917static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
1119 * function: zfcp_fsf_abort_fcp_command_handler
1120 *
1121 * purpose: is called for finished Abort FCP Command request
1122 *
1123 * returns:
1124 */
1125static int
1126zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1127{ 918{
1128 int retval = -EINVAL; 919 struct zfcp_unit *unit = req->data;
1129 struct zfcp_unit *unit; 920 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
1130 union fsf_status_qual *fsf_stat_qual =
1131 &new_fsf_req->qtcb->header.fsf_status_qual;
1132
1133 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1134 /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
1135 goto skip_fsfstatus;
1136 }
1137
1138 unit = (struct zfcp_unit *) new_fsf_req->data;
1139 921
1140 /* evaluate FSF status in QTCB */ 922 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1141 switch (new_fsf_req->qtcb->header.fsf_status) { 923 return;
1142 924
925 switch (req->qtcb->header.fsf_status) {
1143 case FSF_PORT_HANDLE_NOT_VALID: 926 case FSF_PORT_HANDLE_NOT_VALID:
1144 if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { 927 if (fsq->word[0] == fsq->word[1]) {
1145 /*
1146 * In this case a command that was sent prior to a port
1147 * reopen was aborted (handles are different). This is
1148 * fine.
1149 */
1150 } else {
1151 ZFCP_LOG_INFO("Temporary port identifier 0x%x for "
1152 "port 0x%016Lx on adapter %s invalid. "
1153 "This may happen occasionally.\n",
1154 unit->port->handle,
1155 unit->port->wwpn,
1156 zfcp_get_busid_by_unit(unit));
1157 ZFCP_LOG_INFO("status qualifier:\n");
1158 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1159 (char *) &new_fsf_req->qtcb->header.
1160 fsf_status_qual,
1161 sizeof (union fsf_status_qual));
1162 /* Let's hope this sorts out the mess */
1163 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104, 928 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104,
1164 new_fsf_req); 929 req);
1165 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 930 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1166 } 931 }
1167 break; 932 break;
1168
1169 case FSF_LUN_HANDLE_NOT_VALID: 933 case FSF_LUN_HANDLE_NOT_VALID:
1170 if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { 934 if (fsq->word[0] == fsq->word[1]) {
1171 /* 935 zfcp_erp_port_reopen(unit->port, 0, 105, req);
1172 * In this case a command that was sent prior to a unit 936 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1173 * reopen was aborted (handles are different).
1174 * This is fine.
1175 */
1176 } else {
1177 ZFCP_LOG_INFO
1178 ("Warning: Temporary LUN identifier 0x%x of LUN "
1179 "0x%016Lx on port 0x%016Lx on adapter %s is "
1180 "invalid. This may happen in rare cases. "
1181 "Trying to re-establish link.\n",
1182 unit->handle,
1183 unit->fcp_lun,
1184 unit->port->wwpn,
1185 zfcp_get_busid_by_unit(unit));
1186 ZFCP_LOG_DEBUG("Status qualifier data:\n");
1187 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
1188 (char *) &new_fsf_req->qtcb->header.
1189 fsf_status_qual,
1190 sizeof (union fsf_status_qual));
1191 /* Let's hope this sorts out the mess */
1192 zfcp_erp_port_reopen(unit->port, 0, 105, new_fsf_req);
1193 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1194 } 937 }
1195 break; 938 break;
1196
1197 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 939 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
1198 retval = 0; 940 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
1199 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
1200 break; 941 break;
1201
1202 case FSF_PORT_BOXED: 942 case FSF_PORT_BOXED:
1203 ZFCP_LOG_INFO("Remote port 0x%016Lx on adapter %s needs to " 943 zfcp_erp_port_boxed(unit->port, 47, req);
1204 "be reopened\n", unit->port->wwpn, 944 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1205 zfcp_get_busid_by_unit(unit)); 945 ZFCP_STATUS_FSFREQ_RETRY;
1206 zfcp_erp_port_boxed(unit->port, 47, new_fsf_req);
1207 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
1208 | ZFCP_STATUS_FSFREQ_RETRY;
1209 break; 946 break;
1210
1211 case FSF_LUN_BOXED: 947 case FSF_LUN_BOXED:
1212 ZFCP_LOG_INFO( 948 zfcp_erp_unit_boxed(unit, 48, req);
1213 "unit 0x%016Lx on port 0x%016Lx on adapter %s needs " 949 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1214 "to be reopened\n", 950 ZFCP_STATUS_FSFREQ_RETRY;
1215 unit->fcp_lun, unit->port->wwpn,
1216 zfcp_get_busid_by_unit(unit));
1217 zfcp_erp_unit_boxed(unit, 48, new_fsf_req);
1218 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
1219 | ZFCP_STATUS_FSFREQ_RETRY;
1220 break; 951 break;
1221
1222 case FSF_ADAPTER_STATUS_AVAILABLE: 952 case FSF_ADAPTER_STATUS_AVAILABLE:
1223 switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]) { 953 switch (fsq->word[0]) {
1224 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 954 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1225 zfcp_test_link(unit->port); 955 zfcp_test_link(unit->port);
1226 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1227 break;
1228 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 956 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1229 /* SCSI stack will escalate */ 957 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1230 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1231 break;
1232 default:
1233 ZFCP_LOG_NORMAL
1234 ("bug: Wrong status qualifier 0x%x arrived.\n",
1235 new_fsf_req->qtcb->header.fsf_status_qual.word[0]);
1236 break; 958 break;
1237 } 959 }
1238 break; 960 break;
1239
1240 case FSF_GOOD: 961 case FSF_GOOD:
1241 retval = 0; 962 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
1242 new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
1243 break;
1244
1245 default:
1246 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
1247 "(debug info 0x%x)\n",
1248 new_fsf_req->qtcb->header.fsf_status);
1249 break; 963 break;
1250 } 964 }
1251 skip_fsfstatus:
1252 return retval;
1253} 965}
1254 966
1255/** 967/**
1256 * zfcp_use_one_sbal - checks whether req buffer and resp bother each fit into 968 * zfcp_fsf_abort_fcp_command - abort running SCSI command
1257 * one SBALE 969 * @old_req_id: unsigned long
1258 * Two scatter-gather lists are passed, one for the reqeust and one for the 970 * @adapter: pointer to struct zfcp_adapter
1259 * response. 971 * @unit: pointer to struct zfcp_unit
972 * @req_flags: integer specifying the request flags
973 * Returns: pointer to struct zfcp_fsf_req
974 *
975 * FIXME(design): should be watched by a timeout !!!
1260 */ 976 */
1261static inline int
1262zfcp_use_one_sbal(struct scatterlist *req, int req_count,
1263 struct scatterlist *resp, int resp_count)
1264{
1265 return ((req_count == 1) &&
1266 (resp_count == 1) &&
1267 (((unsigned long) zfcp_sg_to_address(&req[0]) &
1268 PAGE_MASK) ==
1269 ((unsigned long) (zfcp_sg_to_address(&req[0]) +
1270 req[0].length - 1) & PAGE_MASK)) &&
1271 (((unsigned long) zfcp_sg_to_address(&resp[0]) &
1272 PAGE_MASK) ==
1273 ((unsigned long) (zfcp_sg_to_address(&resp[0]) +
1274 resp[0].length - 1) & PAGE_MASK)));
1275}
1276 977
1277/** 978struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1278 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 979 struct zfcp_adapter *adapter,
1279 * @ct: pointer to struct zfcp_send_ct which conatins all needed data for 980 struct zfcp_unit *unit,
1280 * the request 981 int req_flags)
1281 * @pool: pointer to memory pool, if non-null this pool is used to allocate
1282 * a struct zfcp_fsf_req
1283 * @erp_action: pointer to erp_action, if non-null the Generic Service request
1284 * is sent within error recovery
1285 */
1286int
1287zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1288 struct zfcp_erp_action *erp_action)
1289{ 982{
1290 volatile struct qdio_buffer_element *sbale; 983 volatile struct qdio_buffer_element *sbale;
1291 struct zfcp_port *port; 984 struct zfcp_fsf_req *req = NULL;
1292 struct zfcp_adapter *adapter;
1293 struct zfcp_fsf_req *fsf_req;
1294 unsigned long lock_flags;
1295 int bytes;
1296 int ret = 0;
1297
1298 port = ct->port;
1299 adapter = port->adapter;
1300
1301 ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
1302 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
1303 pool, &lock_flags, &fsf_req);
1304 if (ret < 0) {
1305 ZFCP_LOG_INFO("error: Could not create CT request (FC-GS) for "
1306 "adapter: %s\n",
1307 zfcp_get_busid_by_adapter(adapter));
1308 goto failed_req;
1309 }
1310 985
1311 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 986 spin_lock(&adapter->req_q.lock);
1312 if (zfcp_use_one_sbal(ct->req, ct->req_count, 987 if (!atomic_read(&adapter->req_q.count))
1313 ct->resp, ct->resp_count)){ 988 goto out;
1314 /* both request buffer and response buffer 989 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
1315 fit into one sbale each */ 990 req_flags, adapter->pool.fsf_req_abort);
1316 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; 991 if (unlikely(IS_ERR(req)))
1317 sbale[2].addr = zfcp_sg_to_address(&ct->req[0]); 992 goto out;
1318 sbale[2].length = ct->req[0].length;
1319 sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
1320 sbale[3].length = ct->resp[0].length;
1321 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1322 } else if (adapter->adapter_features &
1323 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1324 /* try to use chained SBALs */
1325 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1326 SBAL_FLAGS0_TYPE_WRITE_READ,
1327 ct->req, ct->req_count,
1328 ZFCP_MAX_SBALS_PER_CT_REQ);
1329 if (bytes <= 0) {
1330 ZFCP_LOG_INFO("error: creation of CT request failed "
1331 "on adapter %s\n",
1332 zfcp_get_busid_by_adapter(adapter));
1333 if (bytes == 0)
1334 ret = -ENOMEM;
1335 else
1336 ret = bytes;
1337
1338 goto failed_send;
1339 }
1340 fsf_req->qtcb->bottom.support.req_buf_length = bytes;
1341 fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1342 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1343 SBAL_FLAGS0_TYPE_WRITE_READ,
1344 ct->resp, ct->resp_count,
1345 ZFCP_MAX_SBALS_PER_CT_REQ);
1346 if (bytes <= 0) {
1347 ZFCP_LOG_INFO("error: creation of CT request failed "
1348 "on adapter %s\n",
1349 zfcp_get_busid_by_adapter(adapter));
1350 if (bytes == 0)
1351 ret = -ENOMEM;
1352 else
1353 ret = bytes;
1354
1355 goto failed_send;
1356 }
1357 fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
1358 } else {
1359 /* reject send generic request */
1360 ZFCP_LOG_INFO(
1361 "error: microcode does not support chained SBALs,"
1362 "CT request too big (adapter %s)\n",
1363 zfcp_get_busid_by_adapter(adapter));
1364 ret = -EOPNOTSUPP;
1365 goto failed_send;
1366 }
1367
1368 /* settings in QTCB */
1369 fsf_req->qtcb->header.port_handle = port->handle;
1370 fsf_req->qtcb->bottom.support.service_class =
1371 ZFCP_FC_SERVICE_CLASS_DEFAULT;
1372 fsf_req->qtcb->bottom.support.timeout = ct->timeout;
1373 fsf_req->data = (unsigned long) ct;
1374
1375 zfcp_san_dbf_event_ct_request(fsf_req);
1376 993
1377 if (erp_action) { 994 if (unlikely(!(atomic_read(&unit->status) &
1378 erp_action->fsf_req = fsf_req; 995 ZFCP_STATUS_COMMON_UNBLOCKED)))
1379 fsf_req->erp_action = erp_action; 996 goto out_error_free;
1380 zfcp_erp_start_timer(fsf_req);
1381 } else
1382 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1383 997
1384 ret = zfcp_fsf_req_send(fsf_req); 998 sbale = zfcp_qdio_sbale_req(req);
1385 if (ret) { 999 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1386 ZFCP_LOG_DEBUG("error: initiation of CT request failed " 1000 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1387 "(adapter %s, port 0x%016Lx)\n",
1388 zfcp_get_busid_by_adapter(adapter), port->wwpn);
1389 goto failed_send;
1390 }
1391 1001
1392 ZFCP_LOG_DEBUG("CT request initiated (adapter %s, port 0x%016Lx)\n", 1002 req->data = unit;
1393 zfcp_get_busid_by_adapter(adapter), port->wwpn); 1003 req->handler = zfcp_fsf_abort_fcp_command_handler;
1394 goto out; 1004 req->qtcb->header.lun_handle = unit->handle;
1005 req->qtcb->header.port_handle = unit->port->handle;
1006 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
1395 1007
1396 failed_send: 1008 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
1397 zfcp_fsf_req_free(fsf_req); 1009 if (!zfcp_fsf_req_send(req))
1398 if (erp_action != NULL) { 1010 goto out;
1399 erp_action->fsf_req = NULL; 1011
1400 } 1012out_error_free:
1401 failed_req: 1013 zfcp_fsf_req_free(req);
1402 out: 1014 req = NULL;
1403 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 1015out:
1404 lock_flags); 1016 spin_unlock(&adapter->req_q.lock);
1405 return ret; 1017 return req;
1406} 1018}
1407 1019
1408/** 1020static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1409 * zfcp_fsf_send_ct_handler - handler for Generic Service requests
1410 * @fsf_req: pointer to struct zfcp_fsf_req
1411 *
1412 * Data specific for the Generic Service request is passed using
1413 * fsf_req->data. There we find the pointer to struct zfcp_send_ct.
1414 * Usually a specific handler for the CT request is called which is
1415 * found in this structure.
1416 */
1417static int
1418zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1419{ 1021{
1420 struct zfcp_port *port; 1022 struct zfcp_adapter *adapter = req->adapter;
1421 struct zfcp_adapter *adapter; 1023 struct zfcp_send_ct *send_ct = req->data;
1422 struct zfcp_send_ct *send_ct; 1024 struct zfcp_port *port = send_ct->port;
1423 struct fsf_qtcb_header *header; 1025 struct fsf_qtcb_header *header = &req->qtcb->header;
1424 struct fsf_qtcb_bottom_support *bottom;
1425 int retval = -EINVAL;
1426 u16 subtable, rule, counter;
1427 1026
1428 adapter = fsf_req->adapter; 1027 send_ct->status = -EINVAL;
1429 send_ct = (struct zfcp_send_ct *) fsf_req->data;
1430 port = send_ct->port;
1431 header = &fsf_req->qtcb->header;
1432 bottom = &fsf_req->qtcb->bottom.support;
1433 1028
1434 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 1029 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1435 goto skip_fsfstatus; 1030 goto skip_fsfstatus;
1436 1031
1437 /* evaluate FSF status in QTCB */
1438 switch (header->fsf_status) { 1032 switch (header->fsf_status) {
1439
1440 case FSF_GOOD: 1033 case FSF_GOOD:
1441 zfcp_san_dbf_event_ct_response(fsf_req); 1034 zfcp_san_dbf_event_ct_response(req);
1442 retval = 0; 1035 send_ct->status = 0;
1443 break; 1036 break;
1444
1445 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1037 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1446 ZFCP_LOG_INFO("error: adapter %s does not support fc " 1038 zfcp_fsf_class_not_supp(req);
1447 "class %d.\n",
1448 zfcp_get_busid_by_port(port),
1449 ZFCP_FC_SERVICE_CLASS_DEFAULT);
1450 /* stop operation for this adapter */
1451 zfcp_erp_adapter_shutdown(adapter, 0, 123, fsf_req);
1452 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1453 break; 1039 break;
1454
1455 case FSF_ADAPTER_STATUS_AVAILABLE: 1040 case FSF_ADAPTER_STATUS_AVAILABLE:
1456 switch (header->fsf_status_qual.word[0]){ 1041 switch (header->fsf_status_qual.word[0]){
1457 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1042 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1458 /* reopening link to port */
1459 zfcp_test_link(port); 1043 zfcp_test_link(port);
1460 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1461 break;
1462 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1044 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1463 /* ERP strategy will escalate */ 1045 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1464 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1465 break;
1466 default:
1467 ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x "
1468 "arrived.\n",
1469 header->fsf_status_qual.word[0]);
1470 break; 1046 break;
1471 } 1047 }
1472 break; 1048 break;
1473
1474 case FSF_ACCESS_DENIED: 1049 case FSF_ACCESS_DENIED:
1475 ZFCP_LOG_NORMAL("access denied, cannot send generic service " 1050 zfcp_fsf_access_denied_port(req, port);
1476 "command (adapter %s, port d_id=0x%06x)\n",
1477 zfcp_get_busid_by_port(port), port->d_id);
1478 for (counter = 0; counter < 2; counter++) {
1479 subtable = header->fsf_status_qual.halfword[counter * 2];
1480 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
1481 switch (subtable) {
1482 case FSF_SQ_CFDC_SUBTABLE_OS:
1483 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
1484 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
1485 case FSF_SQ_CFDC_SUBTABLE_LUN:
1486 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
1487 zfcp_act_subtable_type[subtable], rule);
1488 break;
1489 }
1490 }
1491 zfcp_erp_port_access_denied(port, 55, fsf_req);
1492 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1493 break;
1494
1495 case FSF_GENERIC_COMMAND_REJECTED:
1496 ZFCP_LOG_INFO("generic service command rejected "
1497 "(adapter %s, port d_id=0x%06x)\n",
1498 zfcp_get_busid_by_port(port), port->d_id);
1499 ZFCP_LOG_INFO("status qualifier:\n");
1500 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1501 (char *) &header->fsf_status_qual,
1502 sizeof (union fsf_status_qual));
1503 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1504 break;
1505
1506 case FSF_PORT_HANDLE_NOT_VALID:
1507 ZFCP_LOG_DEBUG("Temporary port identifier 0x%x for port "
1508 "0x%016Lx on adapter %s invalid. This may "
1509 "happen occasionally.\n", port->handle,
1510 port->wwpn, zfcp_get_busid_by_port(port));
1511 ZFCP_LOG_INFO("status qualifier:\n");
1512 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1513 (char *) &header->fsf_status_qual,
1514 sizeof (union fsf_status_qual));
1515 zfcp_erp_adapter_reopen(adapter, 0, 106, fsf_req);
1516 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1517 break; 1051 break;
1518
1519 case FSF_PORT_BOXED: 1052 case FSF_PORT_BOXED:
1520 ZFCP_LOG_INFO("port needs to be reopened " 1053 zfcp_erp_port_boxed(port, 49, req);
1521 "(adapter %s, port d_id=0x%06x)\n", 1054 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1522 zfcp_get_busid_by_port(port), port->d_id); 1055 ZFCP_STATUS_FSFREQ_RETRY;
1523 zfcp_erp_port_boxed(port, 49, fsf_req);
1524 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
1525 | ZFCP_STATUS_FSFREQ_RETRY;
1526 break; 1056 break;
1527 1057 case FSF_PORT_HANDLE_NOT_VALID:
1528 /* following states should never occure, all cases avoided 1058 zfcp_erp_adapter_reopen(adapter, 0, 106, req);
1529 in zfcp_fsf_send_ct - but who knows ... */ 1059 case FSF_GENERIC_COMMAND_REJECTED:
1530 case FSF_PAYLOAD_SIZE_MISMATCH: 1060 case FSF_PAYLOAD_SIZE_MISMATCH:
1531 ZFCP_LOG_INFO("payload size mismatch (adapter: %s, "
1532 "req_buf_length=%d, resp_buf_length=%d)\n",
1533 zfcp_get_busid_by_adapter(adapter),
1534 bottom->req_buf_length, bottom->resp_buf_length);
1535 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1536 break;
1537 case FSF_REQUEST_SIZE_TOO_LARGE: 1061 case FSF_REQUEST_SIZE_TOO_LARGE:
1538 ZFCP_LOG_INFO("request size too large (adapter: %s, "
1539 "req_buf_length=%d)\n",
1540 zfcp_get_busid_by_adapter(adapter),
1541 bottom->req_buf_length);
1542 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1543 break;
1544 case FSF_RESPONSE_SIZE_TOO_LARGE: 1062 case FSF_RESPONSE_SIZE_TOO_LARGE:
1545 ZFCP_LOG_INFO("response size too large (adapter: %s, "
1546 "resp_buf_length=%d)\n",
1547 zfcp_get_busid_by_adapter(adapter),
1548 bottom->resp_buf_length);
1549 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1550 break;
1551 case FSF_SBAL_MISMATCH: 1063 case FSF_SBAL_MISMATCH:
1552 ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, " 1064 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1553 "resp_buf_length=%d)\n",
1554 zfcp_get_busid_by_adapter(adapter),
1555 bottom->req_buf_length, bottom->resp_buf_length);
1556 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1557 break;
1558
1559 default:
1560 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
1561 "(debug info 0x%x)\n", header->fsf_status);
1562 break; 1065 break;
1563 } 1066 }
1564 1067
1565skip_fsfstatus: 1068skip_fsfstatus:
1566 send_ct->status = retval; 1069 if (send_ct->handler)
1567
1568 if (send_ct->handler != NULL)
1569 send_ct->handler(send_ct->handler_data); 1070 send_ct->handler(send_ct->handler_data);
1071}
1570 1072
1571 return retval; 1073static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
1074 struct scatterlist *sg_req,
1075 struct scatterlist *sg_resp, int max_sbals)
1076{
1077 int bytes;
1078
1079 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1080 sg_req, max_sbals);
1081 if (bytes <= 0)
1082 return -ENOMEM;
1083 req->qtcb->bottom.support.req_buf_length = bytes;
1084 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1085
1086 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1087 sg_resp, max_sbals);
1088 if (bytes <= 0)
1089 return -ENOMEM;
1090 req->qtcb->bottom.support.resp_buf_length = bytes;
1091
1092 return 0;
1572} 1093}
1573 1094
1574/** 1095/**
1575 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1096 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1576 * @els: pointer to struct zfcp_send_els which contains all needed data for 1097 * @ct: pointer to struct zfcp_send_ct with data for request
1577 * the command. 1098 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1099 * @erp_action: if non-null the Generic Service request sent within ERP
1578 */ 1100 */
1579int 1101int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1580zfcp_fsf_send_els(struct zfcp_send_els *els) 1102 struct zfcp_erp_action *erp_action)
1581{ 1103{
1582 volatile struct qdio_buffer_element *sbale; 1104 struct zfcp_port *port = ct->port;
1583 struct zfcp_fsf_req *fsf_req; 1105 struct zfcp_adapter *adapter = port->adapter;
1584 u32 d_id; 1106 struct zfcp_fsf_req *req;
1585 struct zfcp_adapter *adapter; 1107 int ret = -EIO;
1586 unsigned long lock_flags;
1587 int bytes;
1588 int ret = 0;
1589
1590 d_id = els->d_id;
1591 adapter = els->adapter;
1592 1108
1593 ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, 1109 spin_lock(&adapter->req_q.lock);
1594 ZFCP_REQ_AUTO_CLEANUP, 1110 if (zfcp_fsf_req_sbal_get(adapter))
1595 NULL, &lock_flags, &fsf_req); 1111 goto out;
1596 if (ret < 0) {
1597 ZFCP_LOG_INFO("error: creation of ELS request failed "
1598 "(adapter %s, port d_id: 0x%06x)\n",
1599 zfcp_get_busid_by_adapter(adapter), d_id);
1600 goto failed_req;
1601 }
1602 1112
1603 if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, 1113 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
1604 &els->port->status))) { 1114 ZFCP_REQ_AUTO_CLEANUP, pool);
1605 ret = -EBUSY; 1115 if (unlikely(IS_ERR(req))) {
1606 goto port_blocked; 1116 ret = PTR_ERR(req);
1117 goto out;
1607 } 1118 }
1608 1119
1609 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1120 ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp,
1610 if (zfcp_use_one_sbal(els->req, els->req_count, 1121 FSF_MAX_SBALS_PER_REQ);
1611 els->resp, els->resp_count)){ 1122 if (ret)
1612 /* both request buffer and response buffer
1613 fit into one sbale each */
1614 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1615 sbale[2].addr = zfcp_sg_to_address(&els->req[0]);
1616 sbale[2].length = els->req[0].length;
1617 sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
1618 sbale[3].length = els->resp[0].length;
1619 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1620 } else if (adapter->adapter_features &
1621 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1622 /* try to use chained SBALs */
1623 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1624 SBAL_FLAGS0_TYPE_WRITE_READ,
1625 els->req, els->req_count,
1626 ZFCP_MAX_SBALS_PER_ELS_REQ);
1627 if (bytes <= 0) {
1628 ZFCP_LOG_INFO("error: creation of ELS request failed "
1629 "(adapter %s, port d_id: 0x%06x)\n",
1630 zfcp_get_busid_by_adapter(adapter), d_id);
1631 if (bytes == 0) {
1632 ret = -ENOMEM;
1633 } else {
1634 ret = bytes;
1635 }
1636 goto failed_send;
1637 }
1638 fsf_req->qtcb->bottom.support.req_buf_length = bytes;
1639 fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1640 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
1641 SBAL_FLAGS0_TYPE_WRITE_READ,
1642 els->resp, els->resp_count,
1643 ZFCP_MAX_SBALS_PER_ELS_REQ);
1644 if (bytes <= 0) {
1645 ZFCP_LOG_INFO("error: creation of ELS request failed "
1646 "(adapter %s, port d_id: 0x%06x)\n",
1647 zfcp_get_busid_by_adapter(adapter), d_id);
1648 if (bytes == 0) {
1649 ret = -ENOMEM;
1650 } else {
1651 ret = bytes;
1652 }
1653 goto failed_send;
1654 }
1655 fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
1656 } else {
1657 /* reject request */
1658 ZFCP_LOG_INFO("error: microcode does not support chained SBALs"
1659 ", ELS request too big (adapter %s, "
1660 "port d_id: 0x%06x)\n",
1661 zfcp_get_busid_by_adapter(adapter), d_id);
1662 ret = -EOPNOTSUPP;
1663 goto failed_send;
1664 }
1665
1666 /* settings in QTCB */
1667 fsf_req->qtcb->bottom.support.d_id = d_id;
1668 fsf_req->qtcb->bottom.support.service_class =
1669 ZFCP_FC_SERVICE_CLASS_DEFAULT;
1670 fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
1671 fsf_req->data = (unsigned long) els;
1672
1673 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1674
1675 zfcp_san_dbf_event_els_request(fsf_req);
1676
1677 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1678 ret = zfcp_fsf_req_send(fsf_req);
1679 if (ret) {
1680 ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
1681 "(adapter %s, port d_id: 0x%06x)\n",
1682 zfcp_get_busid_by_adapter(adapter), d_id);
1683 goto failed_send; 1123 goto failed_send;
1684 }
1685 1124
1686 ZFCP_LOG_DEBUG("ELS request initiated (adapter %s, port d_id: " 1125 req->handler = zfcp_fsf_send_ct_handler;
1687 "0x%06x)\n", zfcp_get_busid_by_adapter(adapter), d_id); 1126 req->qtcb->header.port_handle = port->handle;
1688 goto out; 1127 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1128 req->qtcb->bottom.support.timeout = ct->timeout;
1129 req->data = ct;
1689 1130
1690 port_blocked: 1131 zfcp_san_dbf_event_ct_request(req);
1691 failed_send:
1692 zfcp_fsf_req_free(fsf_req);
1693 1132
1694 failed_req: 1133 if (erp_action) {
1695 out: 1134 erp_action->fsf_req = req;
1696 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 1135 req->erp_action = erp_action;
1697 lock_flags); 1136 zfcp_fsf_start_erp_timer(req);
1137 } else
1138 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1139
1140 ret = zfcp_fsf_req_send(req);
1141 if (ret)
1142 goto failed_send;
1143
1144 goto out;
1698 1145
1699 return ret; 1146failed_send:
1147 zfcp_fsf_req_free(req);
1148 if (erp_action)
1149 erp_action->fsf_req = NULL;
1150out:
1151 spin_unlock(&adapter->req_q.lock);
1152 return ret;
1700} 1153}
1701 1154
1702/** 1155static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1703 * zfcp_fsf_send_els_handler - handler for ELS commands
1704 * @fsf_req: pointer to struct zfcp_fsf_req
1705 *
1706 * Data specific for the ELS command is passed using
1707 * fsf_req->data. There we find the pointer to struct zfcp_send_els.
1708 * Usually a specific handler for the ELS command is called which is
1709 * found in this structure.
1710 */
1711static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1712{ 1156{
1713 struct zfcp_adapter *adapter; 1157 struct zfcp_send_els *send_els = req->data;
1714 struct zfcp_port *port; 1158 struct zfcp_port *port = send_els->port;
1715 u32 d_id; 1159 struct fsf_qtcb_header *header = &req->qtcb->header;
1716 struct fsf_qtcb_header *header; 1160
1717 struct fsf_qtcb_bottom_support *bottom; 1161 send_els->status = -EINVAL;
1718 struct zfcp_send_els *send_els; 1162
1719 int retval = -EINVAL; 1163 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1720 u16 subtable, rule, counter;
1721
1722 send_els = (struct zfcp_send_els *) fsf_req->data;
1723 adapter = send_els->adapter;
1724 port = send_els->port;
1725 d_id = send_els->d_id;
1726 header = &fsf_req->qtcb->header;
1727 bottom = &fsf_req->qtcb->bottom.support;
1728
1729 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
1730 goto skip_fsfstatus; 1164 goto skip_fsfstatus;
1731 1165
1732 switch (header->fsf_status) { 1166 switch (header->fsf_status) {
1733
1734 case FSF_GOOD: 1167 case FSF_GOOD:
1735 zfcp_san_dbf_event_els_response(fsf_req); 1168 zfcp_san_dbf_event_els_response(req);
1736 retval = 0; 1169 send_els->status = 0;
1737 break; 1170 break;
1738
1739 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1171 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1740 ZFCP_LOG_INFO("error: adapter %s does not support fc " 1172 zfcp_fsf_class_not_supp(req);
1741 "class %d.\n",
1742 zfcp_get_busid_by_adapter(adapter),
1743 ZFCP_FC_SERVICE_CLASS_DEFAULT);
1744 /* stop operation for this adapter */
1745 zfcp_erp_adapter_shutdown(adapter, 0, 124, fsf_req);
1746 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1747 break; 1173 break;
1748
1749 case FSF_ADAPTER_STATUS_AVAILABLE: 1174 case FSF_ADAPTER_STATUS_AVAILABLE:
1750 switch (header->fsf_status_qual.word[0]){ 1175 switch (header->fsf_status_qual.word[0]){
1751 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1176 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1752 if (port && (send_els->ls_code != ZFCP_LS_ADISC)) 1177 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1753 zfcp_test_link(port); 1178 zfcp_test_link(port);
1754 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1179 /*fall through */
1755 break;
1756 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1180 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1757 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1758 retval =
1759 zfcp_handle_els_rjt(header->fsf_status_qual.word[1],
1760 (struct zfcp_ls_rjt_par *)
1761 &header->fsf_status_qual.word[2]);
1762 break;
1763 case FSF_SQ_RETRY_IF_POSSIBLE: 1181 case FSF_SQ_RETRY_IF_POSSIBLE:
1764 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1182 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1765 break; 1183 break;
1766 default:
1767 ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x\n",
1768 header->fsf_status_qual.word[0]);
1769 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
1770 (char*)header->fsf_status_qual.word, 16);
1771 } 1184 }
1772 break; 1185 break;
1773
1774 case FSF_ELS_COMMAND_REJECTED: 1186 case FSF_ELS_COMMAND_REJECTED:
1775 ZFCP_LOG_INFO("ELS has been rejected because command filter "
1776 "prohibited sending "
1777 "(adapter: %s, port d_id: 0x%06x)\n",
1778 zfcp_get_busid_by_adapter(adapter), d_id);
1779
1780 break;
1781
1782 case FSF_PAYLOAD_SIZE_MISMATCH: 1187 case FSF_PAYLOAD_SIZE_MISMATCH:
1783 ZFCP_LOG_INFO(
1784 "ELS request size and ELS response size must be either "
1785 "both 0, or both greater than 0 "
1786 "(adapter: %s, req_buf_length=%d resp_buf_length=%d)\n",
1787 zfcp_get_busid_by_adapter(adapter),
1788 bottom->req_buf_length,
1789 bottom->resp_buf_length);
1790 break;
1791
1792 case FSF_REQUEST_SIZE_TOO_LARGE: 1188 case FSF_REQUEST_SIZE_TOO_LARGE:
1793 ZFCP_LOG_INFO(
1794 "Length of the ELS request buffer, "
1795 "specified in QTCB bottom, "
1796 "exceeds the size of the buffers "
1797 "that have been allocated for ELS request data "
1798 "(adapter: %s, req_buf_length=%d)\n",
1799 zfcp_get_busid_by_adapter(adapter),
1800 bottom->req_buf_length);
1801 break;
1802
1803 case FSF_RESPONSE_SIZE_TOO_LARGE: 1189 case FSF_RESPONSE_SIZE_TOO_LARGE:
1804 ZFCP_LOG_INFO(
1805 "Length of the ELS response buffer, "
1806 "specified in QTCB bottom, "
1807 "exceeds the size of the buffers "
1808 "that have been allocated for ELS response data "
1809 "(adapter: %s, resp_buf_length=%d)\n",
1810 zfcp_get_busid_by_adapter(adapter),
1811 bottom->resp_buf_length);
1812 break;
1813
1814 case FSF_SBAL_MISMATCH:
1815 /* should never occure, avoided in zfcp_fsf_send_els */
1816 ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, "
1817 "resp_buf_length=%d)\n",
1818 zfcp_get_busid_by_adapter(adapter),
1819 bottom->req_buf_length, bottom->resp_buf_length);
1820 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1821 break; 1190 break;
1822
1823 case FSF_ACCESS_DENIED: 1191 case FSF_ACCESS_DENIED:
1824 ZFCP_LOG_NORMAL("access denied, cannot send ELS command " 1192 zfcp_fsf_access_denied_port(req, port);
1825 "(adapter %s, port d_id=0x%06x)\n",
1826 zfcp_get_busid_by_adapter(adapter), d_id);
1827 for (counter = 0; counter < 2; counter++) {
1828 subtable = header->fsf_status_qual.halfword[counter * 2];
1829 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
1830 switch (subtable) {
1831 case FSF_SQ_CFDC_SUBTABLE_OS:
1832 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
1833 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
1834 case FSF_SQ_CFDC_SUBTABLE_LUN:
1835 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
1836 zfcp_act_subtable_type[subtable], rule);
1837 break;
1838 }
1839 }
1840 if (port != NULL)
1841 zfcp_erp_port_access_denied(port, 56, fsf_req);
1842 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1843 break; 1193 break;
1844 1194 case FSF_SBAL_MISMATCH:
1195 /* should never occure, avoided in zfcp_fsf_send_els */
1196 /* fall through */
1845 default: 1197 default:
1846 ZFCP_LOG_NORMAL( 1198 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1847 "bug: An unknown FSF Status was presented "
1848 "(adapter: %s, fsf_status=0x%08x)\n",
1849 zfcp_get_busid_by_adapter(adapter),
1850 header->fsf_status);
1851 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1852 break; 1199 break;
1853 } 1200 }
1854
1855skip_fsfstatus: 1201skip_fsfstatus:
1856 send_els->status = retval;
1857
1858 if (send_els->handler) 1202 if (send_els->handler)
1859 send_els->handler(send_els->handler_data); 1203 send_els->handler(send_els->handler_data);
1204}
1860 1205
1861 return retval; 1206/**
1207 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1208 * @els: pointer to struct zfcp_send_els with data for the command
1209 */
1210int zfcp_fsf_send_els(struct zfcp_send_els *els)
1211{
1212 struct zfcp_fsf_req *req;
1213 struct zfcp_adapter *adapter = els->adapter;
1214 struct fsf_qtcb_bottom_support *bottom;
1215 int ret = -EIO;
1216
1217 if (unlikely(!(atomic_read(&els->port->status) &
1218 ZFCP_STATUS_COMMON_UNBLOCKED)))
1219 return -EBUSY;
1220
1221 spin_lock(&adapter->req_q.lock);
1222 if (!atomic_read(&adapter->req_q.count))
1223 goto out;
1224 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
1225 ZFCP_REQ_AUTO_CLEANUP, NULL);
1226 if (unlikely(IS_ERR(req))) {
1227 ret = PTR_ERR(req);
1228 goto out;
1229 }
1230
1231 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp,
1232 FSF_MAX_SBALS_PER_ELS_REQ);
1233 if (ret)
1234 goto failed_send;
1235
1236 bottom = &req->qtcb->bottom.support;
1237 req->handler = zfcp_fsf_send_els_handler;
1238 bottom->d_id = els->d_id;
1239 bottom->service_class = FSF_CLASS_3;
1240 bottom->timeout = 2 * R_A_TOV;
1241 req->data = els;
1242
1243 zfcp_san_dbf_event_els_request(req);
1244
1245 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1246 ret = zfcp_fsf_req_send(req);
1247 if (ret)
1248 goto failed_send;
1249
1250 goto out;
1251
1252failed_send:
1253 zfcp_fsf_req_free(req);
1254out:
1255 spin_unlock(&adapter->req_q.lock);
1256 return ret;
1862} 1257}
1863 1258
1864int 1259int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1865zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1866{ 1260{
1867 volatile struct qdio_buffer_element *sbale; 1261 volatile struct qdio_buffer_element *sbale;
1868 struct zfcp_fsf_req *fsf_req; 1262 struct zfcp_fsf_req *req;
1869 struct zfcp_adapter *adapter = erp_action->adapter; 1263 struct zfcp_adapter *adapter = erp_action->adapter;
1870 unsigned long lock_flags; 1264 int retval = -EIO;
1871 int retval; 1265
1872 1266 spin_lock(&adapter->req_q.lock);
1873 /* setup new FSF request */ 1267 if (!atomic_read(&adapter->req_q.count))
1874 retval = zfcp_fsf_req_create(adapter, 1268 goto out;
1875 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1269 req = zfcp_fsf_req_create(adapter,
1876 ZFCP_REQ_AUTO_CLEANUP, 1270 FSF_QTCB_EXCHANGE_CONFIG_DATA,
1877 adapter->pool.fsf_req_erp, 1271 ZFCP_REQ_AUTO_CLEANUP,
1878 &lock_flags, &fsf_req); 1272 adapter->pool.fsf_req_erp);
1879 if (retval) { 1273 if (unlikely(IS_ERR(req))) {
1880 ZFCP_LOG_INFO("error: Could not create exchange configuration " 1274 retval = PTR_ERR(req);
1881 "data request for adapter %s.\n", 1275 goto out;
1882 zfcp_get_busid_by_adapter(adapter));
1883 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
1884 lock_flags);
1885 return retval;
1886 } 1276 }
1887 1277
1888 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1278 sbale = zfcp_qdio_sbale_req(req);
1889 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1279 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1890 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1280 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1891 1281
1892 fsf_req->qtcb->bottom.config.feature_selection = 1282 req->qtcb->bottom.config.feature_selection =
1893 FSF_FEATURE_CFDC | 1283 FSF_FEATURE_CFDC |
1894 FSF_FEATURE_LUN_SHARING | 1284 FSF_FEATURE_LUN_SHARING |
1895 FSF_FEATURE_NOTIFICATION_LOST | 1285 FSF_FEATURE_NOTIFICATION_LOST |
1896 FSF_FEATURE_UPDATE_ALERT; 1286 FSF_FEATURE_UPDATE_ALERT;
1897 fsf_req->erp_action = erp_action; 1287 req->erp_action = erp_action;
1898 erp_action->fsf_req = fsf_req; 1288 req->handler = zfcp_fsf_exchange_config_data_handler;
1289 erp_action->fsf_req = req;
1899 1290
1900 zfcp_erp_start_timer(fsf_req); 1291 zfcp_fsf_start_erp_timer(req);
1901 retval = zfcp_fsf_req_send(fsf_req); 1292 retval = zfcp_fsf_req_send(req);
1902 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
1903 lock_flags);
1904 if (retval) { 1293 if (retval) {
1905 ZFCP_LOG_INFO("error: Could not send exchange configuration " 1294 zfcp_fsf_req_free(req);
1906 "data command on the adapter %s\n",
1907 zfcp_get_busid_by_adapter(adapter));
1908 zfcp_fsf_req_free(fsf_req);
1909 erp_action->fsf_req = NULL; 1295 erp_action->fsf_req = NULL;
1910 } 1296 }
1911 else 1297out:
1912 ZFCP_LOG_DEBUG("exchange configuration data request initiated " 1298 spin_unlock(&adapter->req_q.lock);
1913 "(adapter %s)\n",
1914 zfcp_get_busid_by_adapter(adapter));
1915
1916 return retval; 1299 return retval;
1917} 1300}
1918 1301
1919int 1302int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1920zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, 1303 struct fsf_qtcb_bottom_config *data)
1921 struct fsf_qtcb_bottom_config *data)
1922{ 1304{
1923 volatile struct qdio_buffer_element *sbale; 1305 volatile struct qdio_buffer_element *sbale;
1924 struct zfcp_fsf_req *fsf_req; 1306 struct zfcp_fsf_req *req = NULL;
1925 unsigned long lock_flags; 1307 int retval = -EIO;
1926 int retval; 1308
1927 1309 spin_lock(&adapter->req_q.lock);
1928 /* setup new FSF request */ 1310 if (zfcp_fsf_req_sbal_get(adapter))
1929 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1311 goto out;
1930 ZFCP_WAIT_FOR_SBAL, NULL, &lock_flags, 1312
1931 &fsf_req); 1313 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1932 if (retval) { 1314 0, NULL);
1933 ZFCP_LOG_INFO("error: Could not create exchange configuration " 1315 if (unlikely(IS_ERR(req))) {
1934 "data request for adapter %s.\n", 1316 retval = PTR_ERR(req);
1935 zfcp_get_busid_by_adapter(adapter)); 1317 goto out;
1936 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
1937 lock_flags);
1938 return retval;
1939 } 1318 }
1940 1319
1941 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1320 sbale = zfcp_qdio_sbale_req(req);
1942 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1321 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1943 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1322 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1323 req->handler = zfcp_fsf_exchange_config_data_handler;
1944 1324
1945 fsf_req->qtcb->bottom.config.feature_selection = 1325 req->qtcb->bottom.config.feature_selection =
1946 FSF_FEATURE_CFDC | 1326 FSF_FEATURE_CFDC |
1947 FSF_FEATURE_LUN_SHARING | 1327 FSF_FEATURE_LUN_SHARING |
1948 FSF_FEATURE_NOTIFICATION_LOST | 1328 FSF_FEATURE_NOTIFICATION_LOST |
1949 FSF_FEATURE_UPDATE_ALERT; 1329 FSF_FEATURE_UPDATE_ALERT;
1950 1330
1951 if (data) 1331 if (data)
1952 fsf_req->data = (unsigned long) data; 1332 req->data = data;
1953 1333
1954 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); 1334 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1955 retval = zfcp_fsf_req_send(fsf_req); 1335 retval = zfcp_fsf_req_send(req);
1956 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 1336out:
1957 lock_flags); 1337 spin_unlock(&adapter->req_q.lock);
1958 if (retval) 1338 if (!retval)
1959 ZFCP_LOG_INFO("error: Could not send exchange configuration " 1339 wait_event(req->completion_wq,
1960 "data command on the adapter %s\n", 1340 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
1961 zfcp_get_busid_by_adapter(adapter));
1962 else
1963 wait_event(fsf_req->completion_wq,
1964 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
1965 1341
1966 zfcp_fsf_req_free(fsf_req); 1342 zfcp_fsf_req_free(req);
1967 1343
1968 return retval; 1344 return retval;
1969} 1345}
1970 1346
1971/** 1347/**
1972 * zfcp_fsf_exchange_config_evaluate
1973 * @fsf_req: fsf_req which belongs to xchg config data request
1974 * @xchg_ok: specifies if xchg config data was incomplete or complete (0/1)
1975 *
1976 * returns: -EIO on error, 0 otherwise
1977 */
1978static int
1979zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
1980{
1981 struct fsf_qtcb_bottom_config *bottom;
1982 struct zfcp_adapter *adapter = fsf_req->adapter;
1983 struct Scsi_Host *shost = adapter->scsi_host;
1984
1985 bottom = &fsf_req->qtcb->bottom.config;
1986 ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
1987 bottom->low_qtcb_version, bottom->high_qtcb_version);
1988 adapter->fsf_lic_version = bottom->lic_version;
1989 adapter->adapter_features = bottom->adapter_features;
1990 adapter->connection_features = bottom->connection_features;
1991 adapter->peer_wwpn = 0;
1992 adapter->peer_wwnn = 0;
1993 adapter->peer_d_id = 0;
1994
1995 if (xchg_ok) {
1996
1997 if (fsf_req->data)
1998 memcpy((struct fsf_qtcb_bottom_config *) fsf_req->data,
1999 bottom, sizeof (struct fsf_qtcb_bottom_config));
2000
2001 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
2002 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
2003 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
2004 fc_host_speed(shost) = bottom->fc_link_speed;
2005 fc_host_supported_classes(shost) =
2006 FC_COS_CLASS2 | FC_COS_CLASS3;
2007 adapter->hydra_version = bottom->adapter_type;
2008 if (fc_host_permanent_port_name(shost) == -1)
2009 fc_host_permanent_port_name(shost) =
2010 fc_host_port_name(shost);
2011 if (bottom->fc_topology == FSF_TOPO_P2P) {
2012 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
2013 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
2014 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
2015 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
2016 } else if (bottom->fc_topology == FSF_TOPO_FABRIC)
2017 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
2018 else if (bottom->fc_topology == FSF_TOPO_AL)
2019 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
2020 else
2021 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
2022 } else {
2023 fc_host_node_name(shost) = 0;
2024 fc_host_port_name(shost) = 0;
2025 fc_host_port_id(shost) = 0;
2026 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
2027 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
2028 adapter->hydra_version = 0;
2029 }
2030
2031 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
2032 adapter->hardware_version = bottom->hardware_version;
2033 memcpy(fc_host_serial_number(shost), bottom->serial_number,
2034 min(FC_SERIAL_NUMBER_SIZE, 17));
2035 EBCASC(fc_host_serial_number(shost),
2036 min(FC_SERIAL_NUMBER_SIZE, 17));
2037 }
2038
2039 if (fsf_req->erp_action)
2040 ZFCP_LOG_NORMAL("The adapter %s reported the following "
2041 "characteristics:\n"
2042 "WWNN 0x%016Lx, WWPN 0x%016Lx, "
2043 "S_ID 0x%06x,\n"
2044 "adapter version 0x%x, "
2045 "LIC version 0x%x, "
2046 "FC link speed %d Gb/s\n",
2047 zfcp_get_busid_by_adapter(adapter),
2048 (wwn_t) fc_host_node_name(shost),
2049 (wwn_t) fc_host_port_name(shost),
2050 fc_host_port_id(shost),
2051 adapter->hydra_version,
2052 adapter->fsf_lic_version,
2053 fc_host_speed(shost));
2054 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
2055 ZFCP_LOG_NORMAL("error: the adapter %s "
2056 "only supports newer control block "
2057 "versions in comparison to this device "
2058 "driver (try updated device driver)\n",
2059 zfcp_get_busid_by_adapter(adapter));
2060 zfcp_erp_adapter_shutdown(adapter, 0, 125, fsf_req);
2061 return -EIO;
2062 }
2063 if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) {
2064 ZFCP_LOG_NORMAL("error: the adapter %s "
2065 "only supports older control block "
2066 "versions than this device driver uses"
2067 "(consider a microcode upgrade)\n",
2068 zfcp_get_busid_by_adapter(adapter));
2069 zfcp_erp_adapter_shutdown(adapter, 0, 126, fsf_req);
2070 return -EIO;
2071 }
2072 return 0;
2073}
2074
2075/**
2076 * function: zfcp_fsf_exchange_config_data_handler
2077 *
2078 * purpose: is called for finished Exchange Configuration Data command
2079 *
2080 * returns:
2081 */
2082static int
2083zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2084{
2085 struct fsf_qtcb_bottom_config *bottom;
2086 struct zfcp_adapter *adapter = fsf_req->adapter;
2087 struct fsf_qtcb *qtcb = fsf_req->qtcb;
2088
2089 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2090 return -EIO;
2091
2092 switch (qtcb->header.fsf_status) {
2093
2094 case FSF_GOOD:
2095 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
2096 return -EIO;
2097
2098 switch (fc_host_port_type(adapter->scsi_host)) {
2099 case FC_PORTTYPE_PTP:
2100 ZFCP_LOG_NORMAL("Point-to-Point fibrechannel "
2101 "configuration detected at adapter %s\n"
2102 "Peer WWNN 0x%016llx, "
2103 "peer WWPN 0x%016llx, "
2104 "peer d_id 0x%06x\n",
2105 zfcp_get_busid_by_adapter(adapter),
2106 adapter->peer_wwnn,
2107 adapter->peer_wwpn,
2108 adapter->peer_d_id);
2109 break;
2110 case FC_PORTTYPE_NLPORT:
2111 ZFCP_LOG_NORMAL("error: Arbitrated loop fibrechannel "
2112 "topology detected at adapter %s "
2113 "unsupported, shutting down adapter\n",
2114 zfcp_get_busid_by_adapter(adapter));
2115 zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req);
2116 return -EIO;
2117 case FC_PORTTYPE_NPORT:
2118 if (fsf_req->erp_action)
2119 ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
2120 "network detected at adapter "
2121 "%s.\n",
2122 zfcp_get_busid_by_adapter(adapter));
2123 break;
2124 default:
2125 ZFCP_LOG_NORMAL("bug: The fibrechannel topology "
2126 "reported by the exchange "
2127 "configuration command for "
2128 "the adapter %s is not "
2129 "of a type known to the zfcp "
2130 "driver, shutting down adapter\n",
2131 zfcp_get_busid_by_adapter(adapter));
2132 zfcp_erp_adapter_shutdown(adapter, 0, 128, fsf_req);
2133 return -EIO;
2134 }
2135 bottom = &qtcb->bottom.config;
2136 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
2137 ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
2138 "allowed by the adapter %s "
2139 "is lower than the minimum "
2140 "required by the driver (%ld bytes).\n",
2141 bottom->max_qtcb_size,
2142 zfcp_get_busid_by_adapter(adapter),
2143 sizeof(struct fsf_qtcb));
2144 zfcp_erp_adapter_shutdown(adapter, 0, 129, fsf_req);
2145 return -EIO;
2146 }
2147 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
2148 &adapter->status);
2149 break;
2150 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2151 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
2152 return -EIO;
2153
2154 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
2155 &adapter->status);
2156
2157 zfcp_fsf_link_down_info_eval(fsf_req, 42,
2158 &qtcb->header.fsf_status_qual.link_down_info);
2159 break;
2160 default:
2161 zfcp_erp_adapter_shutdown(adapter, 0, 130, fsf_req);
2162 return -EIO;
2163 }
2164 return 0;
2165}
2166
2167/**
2168 * zfcp_fsf_exchange_port_data - request information about local port 1348 * zfcp_fsf_exchange_port_data - request information about local port
2169 * @erp_action: ERP action for the adapter for which port data is requested 1349 * @erp_action: ERP action for the adapter for which port data is requested
1350 * Returns: 0 on success, error otherwise
2170 */ 1351 */
2171int 1352int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
2172zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
2173{ 1353{
2174 volatile struct qdio_buffer_element *sbale; 1354 volatile struct qdio_buffer_element *sbale;
2175 struct zfcp_fsf_req *fsf_req; 1355 struct zfcp_fsf_req *req;
2176 struct zfcp_adapter *adapter = erp_action->adapter; 1356 struct zfcp_adapter *adapter = erp_action->adapter;
2177 unsigned long lock_flags; 1357 int retval = -EIO;
2178 int retval;
2179 1358
2180 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) { 1359 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
2181 ZFCP_LOG_INFO("error: exchange port data "
2182 "command not supported by adapter %s\n",
2183 zfcp_get_busid_by_adapter(adapter));
2184 return -EOPNOTSUPP; 1360 return -EOPNOTSUPP;
2185 }
2186 1361
2187 /* setup new FSF request */ 1362 spin_lock(&adapter->req_q.lock);
2188 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1363 if (!atomic_read(&adapter->req_q.count))
2189 ZFCP_REQ_AUTO_CLEANUP, 1364 goto out;
2190 adapter->pool.fsf_req_erp, 1365 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
2191 &lock_flags, &fsf_req); 1366 ZFCP_REQ_AUTO_CLEANUP,
2192 if (retval) { 1367 adapter->pool.fsf_req_erp);
2193 ZFCP_LOG_INFO("error: Out of resources. Could not create an " 1368 if (unlikely(IS_ERR(req))) {
2194 "exchange port data request for " 1369 retval = PTR_ERR(req);
2195 "the adapter %s.\n", 1370 goto out;
2196 zfcp_get_busid_by_adapter(adapter));
2197 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2198 lock_flags);
2199 return retval;
2200 } 1371 }
2201 1372
2202 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1373 sbale = zfcp_qdio_sbale_req(req);
2203 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1374 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2204 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1375 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2205 1376
2206 erp_action->fsf_req = fsf_req; 1377 req->handler = zfcp_fsf_exchange_port_data_handler;
2207 fsf_req->erp_action = erp_action; 1378 req->erp_action = erp_action;
2208 zfcp_erp_start_timer(fsf_req); 1379 erp_action->fsf_req = req;
2209
2210 retval = zfcp_fsf_req_send(fsf_req);
2211 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
2212 1380
1381 zfcp_fsf_start_erp_timer(req);
1382 retval = zfcp_fsf_req_send(req);
2213 if (retval) { 1383 if (retval) {
2214 ZFCP_LOG_INFO("error: Could not send an exchange port data " 1384 zfcp_fsf_req_free(req);
2215 "command on the adapter %s\n",
2216 zfcp_get_busid_by_adapter(adapter));
2217 zfcp_fsf_req_free(fsf_req);
2218 erp_action->fsf_req = NULL; 1385 erp_action->fsf_req = NULL;
2219 } 1386 }
2220 else 1387out:
2221 ZFCP_LOG_DEBUG("exchange port data request initiated " 1388 spin_unlock(&adapter->req_q.lock);
2222 "(adapter %s)\n",
2223 zfcp_get_busid_by_adapter(adapter));
2224 return retval; 1389 return retval;
2225} 1390}
2226 1391
2227
2228/** 1392/**
2229 * zfcp_fsf_exchange_port_data_sync - request information about local port 1393 * zfcp_fsf_exchange_port_data_sync - request information about local port
2230 * and wait until information is ready 1394 * @adapter: pointer to struct zfcp_adapter
1395 * @data: pointer to struct fsf_qtcb_bottom_port
1396 * Returns: 0 on success, error otherwise
2231 */ 1397 */
2232int 1398int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
2233zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, 1399 struct fsf_qtcb_bottom_port *data)
2234 struct fsf_qtcb_bottom_port *data)
2235{ 1400{
2236 volatile struct qdio_buffer_element *sbale; 1401 volatile struct qdio_buffer_element *sbale;
2237 struct zfcp_fsf_req *fsf_req; 1402 struct zfcp_fsf_req *req = NULL;
2238 unsigned long lock_flags; 1403 int retval = -EIO;
2239 int retval; 1404
2240 1405 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
2241 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2242 ZFCP_LOG_INFO("error: exchange port data "
2243 "command not supported by adapter %s\n",
2244 zfcp_get_busid_by_adapter(adapter));
2245 return -EOPNOTSUPP; 1406 return -EOPNOTSUPP;
2246 }
2247 1407
2248 /* setup new FSF request */ 1408 spin_lock(&adapter->req_q.lock);
2249 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1409 if (!atomic_read(&adapter->req_q.count))
2250 0, NULL, &lock_flags, &fsf_req); 1410 goto out;
2251 if (retval) { 1411
2252 ZFCP_LOG_INFO("error: Out of resources. Could not create an " 1412 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
2253 "exchange port data request for " 1413 NULL);
2254 "the adapter %s.\n", 1414 if (unlikely(IS_ERR(req))) {
2255 zfcp_get_busid_by_adapter(adapter)); 1415 retval = PTR_ERR(req);
2256 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 1416 goto out;
2257 lock_flags);
2258 return retval;
2259 } 1417 }
2260 1418
2261 if (data) 1419 if (data)
2262 fsf_req->data = (unsigned long) data; 1420 req->data = data;
2263 1421
2264 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1422 sbale = zfcp_qdio_sbale_req(req);
2265 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1423 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2266 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1424 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2267 1425
2268 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); 1426 req->handler = zfcp_fsf_exchange_port_data_handler;
2269 retval = zfcp_fsf_req_send(fsf_req); 1427 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2270 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 1428 retval = zfcp_fsf_req_send(req);
2271 1429out:
2272 if (retval) 1430 spin_unlock(&adapter->req_q.lock);
2273 ZFCP_LOG_INFO("error: Could not send an exchange port data " 1431 if (!retval)
2274 "command on the adapter %s\n", 1432 wait_event(req->completion_wq,
2275 zfcp_get_busid_by_adapter(adapter)); 1433 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2276 else 1434 zfcp_fsf_req_free(req);
2277 wait_event(fsf_req->completion_wq,
2278 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2279
2280 zfcp_fsf_req_free(fsf_req);
2281
2282 return retval;
2283}
2284
2285/**
2286 * zfcp_fsf_exchange_port_evaluate
2287 * @fsf_req: fsf_req which belongs to xchg port data request
2288 * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1)
2289 */
2290static void
2291zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2292{
2293 struct zfcp_adapter *adapter;
2294 struct fsf_qtcb_bottom_port *bottom;
2295 struct Scsi_Host *shost;
2296
2297 adapter = fsf_req->adapter;
2298 bottom = &fsf_req->qtcb->bottom.port;
2299 shost = adapter->scsi_host;
2300
2301 if (fsf_req->data)
2302 memcpy((struct fsf_qtcb_bottom_port*) fsf_req->data, bottom,
2303 sizeof(struct fsf_qtcb_bottom_port));
2304
2305 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
2306 fc_host_permanent_port_name(shost) = bottom->wwpn;
2307 else
2308 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
2309 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2310 fc_host_supported_speeds(shost) = bottom->supported_speed;
2311}
2312
2313/**
2314 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request
2315 * @fsf_req: pointer to struct zfcp_fsf_req
2316 */
2317static void
2318zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
2319{
2320 struct zfcp_adapter *adapter;
2321 struct fsf_qtcb *qtcb;
2322
2323 adapter = fsf_req->adapter;
2324 qtcb = fsf_req->qtcb;
2325
2326 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2327 return;
2328
2329 switch (qtcb->header.fsf_status) {
2330 case FSF_GOOD:
2331 zfcp_fsf_exchange_port_evaluate(fsf_req, 1);
2332 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2333 break;
2334 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2335 zfcp_fsf_exchange_port_evaluate(fsf_req, 0);
2336 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2337 zfcp_fsf_link_down_info_eval(fsf_req, 43,
2338 &qtcb->header.fsf_status_qual.link_down_info);
2339 break;
2340 }
2341}
2342
2343
2344/*
2345 * function: zfcp_fsf_open_port
2346 *
2347 * purpose:
2348 *
2349 * returns: address of initiated FSF request
2350 * NULL - request could not be initiated
2351 */
2352int
2353zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2354{
2355 volatile struct qdio_buffer_element *sbale;
2356 struct zfcp_fsf_req *fsf_req;
2357 unsigned long lock_flags;
2358 int retval = 0;
2359
2360 /* setup new FSF request */
2361 retval = zfcp_fsf_req_create(erp_action->adapter,
2362 FSF_QTCB_OPEN_PORT_WITH_DID,
2363 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2364 erp_action->adapter->pool.fsf_req_erp,
2365 &lock_flags, &fsf_req);
2366 if (retval < 0) {
2367 ZFCP_LOG_INFO("error: Could not create open port request "
2368 "for port 0x%016Lx on adapter %s.\n",
2369 erp_action->port->wwpn,
2370 zfcp_get_busid_by_adapter(erp_action->adapter));
2371 goto out;
2372 }
2373
2374 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2375 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2376 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2377
2378 fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2379 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2380 fsf_req->data = (unsigned long) erp_action->port;
2381 fsf_req->erp_action = erp_action;
2382 erp_action->fsf_req = fsf_req;
2383
2384 zfcp_erp_start_timer(fsf_req);
2385 retval = zfcp_fsf_req_send(fsf_req);
2386 if (retval) {
2387 ZFCP_LOG_INFO("error: Could not send open port request for "
2388 "port 0x%016Lx on adapter %s.\n",
2389 erp_action->port->wwpn,
2390 zfcp_get_busid_by_adapter(erp_action->adapter));
2391 zfcp_fsf_req_free(fsf_req);
2392 erp_action->fsf_req = NULL;
2393 goto out;
2394 }
2395 1435
2396 ZFCP_LOG_DEBUG("open port request initiated "
2397 "(adapter %s, port 0x%016Lx)\n",
2398 zfcp_get_busid_by_adapter(erp_action->adapter),
2399 erp_action->port->wwpn);
2400 out:
2401 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2402 lock_flags);
2403 return retval; 1436 return retval;
2404} 1437}
2405 1438
2406/* 1439static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
2407 * function: zfcp_fsf_open_port_handler
2408 *
2409 * purpose: is called for finished Open Port command
2410 *
2411 * returns:
2412 */
2413static int
2414zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
2415{ 1440{
2416 int retval = -EINVAL; 1441 struct zfcp_port *port = req->data;
2417 struct zfcp_port *port; 1442 struct fsf_qtcb_header *header = &req->qtcb->header;
2418 struct fsf_plogi *plogi; 1443 struct fsf_plogi *plogi;
2419 struct fsf_qtcb_header *header;
2420 u16 subtable, rule, counter;
2421 1444
2422 port = (struct zfcp_port *) fsf_req->data; 1445 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2423 header = &fsf_req->qtcb->header;
2424
2425 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2426 /* don't change port status in our bookkeeping */
2427 goto skip_fsfstatus; 1446 goto skip_fsfstatus;
2428 }
2429 1447
2430 /* evaluate FSF status in QTCB */
2431 switch (header->fsf_status) { 1448 switch (header->fsf_status) {
2432
2433 case FSF_PORT_ALREADY_OPEN: 1449 case FSF_PORT_ALREADY_OPEN:
2434 ZFCP_LOG_NORMAL("bug: remote port 0x%016Lx on adapter %s "
2435 "is already open.\n",
2436 port->wwpn, zfcp_get_busid_by_port(port));
2437 /*
2438 * This is a bug, however operation should continue normally
2439 * if it is simply ignored
2440 */
2441 break; 1450 break;
2442
2443 case FSF_ACCESS_DENIED: 1451 case FSF_ACCESS_DENIED:
2444 ZFCP_LOG_NORMAL("Access denied, cannot open port 0x%016Lx " 1452 zfcp_fsf_access_denied_port(req, port);
2445 "on adapter %s\n",
2446 port->wwpn, zfcp_get_busid_by_port(port));
2447 for (counter = 0; counter < 2; counter++) {
2448 subtable = header->fsf_status_qual.halfword[counter * 2];
2449 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
2450 switch (subtable) {
2451 case FSF_SQ_CFDC_SUBTABLE_OS:
2452 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
2453 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
2454 case FSF_SQ_CFDC_SUBTABLE_LUN:
2455 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
2456 zfcp_act_subtable_type[subtable], rule);
2457 break;
2458 }
2459 }
2460 zfcp_erp_port_access_denied(port, 57, fsf_req);
2461 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2462 break; 1453 break;
2463
2464 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1454 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
2465 ZFCP_LOG_INFO("error: The FSF adapter is out of resources. " 1455 dev_warn(&req->adapter->ccw_device->dev,
2466 "The remote port 0x%016Lx on adapter %s " 1456 "The adapter is out of resources. The remote port "
2467 "could not be opened. Disabling it.\n", 1457 "0x%016Lx could not be opened, disabling it.\n",
2468 port->wwpn, zfcp_get_busid_by_port(port)); 1458 port->wwpn);
2469 zfcp_erp_port_failed(port, 31, fsf_req); 1459 zfcp_erp_port_failed(port, 31, req);
2470 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1460 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2471 break; 1461 break;
2472
2473 case FSF_ADAPTER_STATUS_AVAILABLE: 1462 case FSF_ADAPTER_STATUS_AVAILABLE:
2474 switch (header->fsf_status_qual.word[0]) { 1463 switch (header->fsf_status_qual.word[0]) {
2475 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1464 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2476 /* ERP strategy will escalate */
2477 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2478 break;
2479 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1465 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2480 /* ERP strategy will escalate */ 1466 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2481 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2482 break; 1467 break;
2483 case FSF_SQ_NO_RETRY_POSSIBLE: 1468 case FSF_SQ_NO_RETRY_POSSIBLE:
2484 ZFCP_LOG_NORMAL("The remote port 0x%016Lx on " 1469 dev_warn(&req->adapter->ccw_device->dev,
2485 "adapter %s could not be opened. " 1470 "The remote port 0x%016Lx could not be "
2486 "Disabling it.\n", 1471 "opened. Disabling it.\n", port->wwpn);
2487 port->wwpn, 1472 zfcp_erp_port_failed(port, 32, req);
2488 zfcp_get_busid_by_port(port)); 1473 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2489 zfcp_erp_port_failed(port, 32, fsf_req);
2490 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2491 break;
2492 default:
2493 ZFCP_LOG_NORMAL
2494 ("bug: Wrong status qualifier 0x%x arrived.\n",
2495 header->fsf_status_qual.word[0]);
2496 break; 1474 break;
2497 } 1475 }
2498 break; 1476 break;
2499
2500 case FSF_GOOD: 1477 case FSF_GOOD:
2501 /* save port handle assigned by FSF */
2502 port->handle = header->port_handle; 1478 port->handle = header->port_handle;
2503 ZFCP_LOG_INFO("The remote port 0x%016Lx via adapter %s "
2504 "was opened, it's port handle is 0x%x\n",
2505 port->wwpn, zfcp_get_busid_by_port(port),
2506 port->handle);
2507 /* mark port as open */
2508 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | 1479 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
2509 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1480 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2510 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1481 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
2511 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1482 ZFCP_STATUS_COMMON_ACCESS_BOXED,
2512 &port->status); 1483 &port->status);
2513 retval = 0;
2514 /* check whether D_ID has changed during open */ 1484 /* check whether D_ID has changed during open */
2515 /* 1485 /*
2516 * FIXME: This check is not airtight, as the FCP channel does 1486 * FIXME: This check is not airtight, as the FCP channel does
@@ -2526,320 +1496,168 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
2526 * another GID_PN straight after a port has been opened. 1496 * another GID_PN straight after a port has been opened.
2527 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1497 * Alternately, an ADISC/PDISC ELS should suffice, as well.
2528 */ 1498 */
2529 plogi = (struct fsf_plogi *) fsf_req->qtcb->bottom.support.els; 1499 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN)
2530 if (!atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, &port->status)) 1500 break;
2531 { 1501
2532 if (fsf_req->qtcb->bottom.support.els1_length < 1502 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
2533 sizeof (struct fsf_plogi)) { 1503 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
2534 ZFCP_LOG_INFO( 1504 if (plogi->serv_param.wwpn != port->wwpn)
2535 "warning: insufficient length of " 1505 atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID,
2536 "PLOGI payload (%i)\n", 1506 &port->status);
2537 fsf_req->qtcb->bottom.support.els1_length); 1507 else {
2538 /* skip sanity check and assume wwpn is ok */ 1508 port->wwnn = plogi->serv_param.wwnn;
2539 } else { 1509 zfcp_fc_plogi_evaluate(port, plogi);
2540 if (plogi->serv_param.wwpn != port->wwpn) {
2541 ZFCP_LOG_INFO("warning: d_id of port "
2542 "0x%016Lx changed during "
2543 "open\n", port->wwpn);
2544 atomic_clear_mask(
2545 ZFCP_STATUS_PORT_DID_DID,
2546 &port->status);
2547 } else {
2548 port->wwnn = plogi->serv_param.wwnn;
2549 zfcp_plogi_evaluate(port, plogi);
2550 }
2551 } 1510 }
2552 } 1511 }
2553 break; 1512 break;
2554
2555 case FSF_UNKNOWN_OP_SUBTYPE: 1513 case FSF_UNKNOWN_OP_SUBTYPE:
2556 /* should never occure, subtype not set in zfcp_fsf_open_port */ 1514 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2557 ZFCP_LOG_INFO("unknown operation subtype (adapter: %s, "
2558 "op_subtype=0x%x)\n",
2559 zfcp_get_busid_by_port(port),
2560 fsf_req->qtcb->bottom.support.operation_subtype);
2561 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2562 break;
2563
2564 default:
2565 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
2566 "(debug info 0x%x)\n",
2567 header->fsf_status);
2568 break; 1515 break;
2569 } 1516 }
2570 1517
2571 skip_fsfstatus: 1518skip_fsfstatus:
2572 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status); 1519 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
2573 return retval;
2574} 1520}
2575 1521
2576/* 1522/**
2577 * function: zfcp_fsf_close_port 1523 * zfcp_fsf_open_port - create and send open port request
2578 * 1524 * @erp_action: pointer to struct zfcp_erp_action
2579 * purpose: submit FSF command "close port" 1525 * Returns: 0 on success, error otherwise
2580 *
2581 * returns: address of initiated FSF request
2582 * NULL - request could not be initiated
2583 */ 1526 */
2584int 1527int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2585zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2586{ 1528{
2587 volatile struct qdio_buffer_element *sbale; 1529 volatile struct qdio_buffer_element *sbale;
2588 struct zfcp_fsf_req *fsf_req; 1530 struct zfcp_adapter *adapter = erp_action->adapter;
2589 unsigned long lock_flags; 1531 struct zfcp_fsf_req *req;
2590 int retval = 0; 1532 int retval = -EIO;
2591 1533
2592 /* setup new FSF request */ 1534 spin_lock(&adapter->req_q.lock);
2593 retval = zfcp_fsf_req_create(erp_action->adapter, 1535 if (zfcp_fsf_req_sbal_get(adapter))
2594 FSF_QTCB_CLOSE_PORT, 1536 goto out;
2595 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 1537
2596 erp_action->adapter->pool.fsf_req_erp, 1538 req = zfcp_fsf_req_create(adapter,
2597 &lock_flags, &fsf_req); 1539 FSF_QTCB_OPEN_PORT_WITH_DID,
2598 if (retval < 0) { 1540 ZFCP_REQ_AUTO_CLEANUP,
2599 ZFCP_LOG_INFO("error: Could not create a close port request " 1541 adapter->pool.fsf_req_erp);
2600 "for port 0x%016Lx on adapter %s.\n", 1542 if (unlikely(IS_ERR(req))) {
2601 erp_action->port->wwpn, 1543 retval = PTR_ERR(req);
2602 zfcp_get_busid_by_adapter(erp_action->adapter));
2603 goto out; 1544 goto out;
2604 } 1545 }
2605 1546
2606 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1547 sbale = zfcp_qdio_sbale_req(req);
2607 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1548 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2608 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1549 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2609 1550
2610 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); 1551 req->handler = zfcp_fsf_open_port_handler;
2611 fsf_req->data = (unsigned long) erp_action->port; 1552 req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2612 fsf_req->erp_action = erp_action; 1553 req->data = erp_action->port;
2613 fsf_req->qtcb->header.port_handle = erp_action->port->handle; 1554 req->erp_action = erp_action;
2614 fsf_req->erp_action = erp_action; 1555 erp_action->fsf_req = req;
2615 erp_action->fsf_req = fsf_req; 1556 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2616 1557
2617 zfcp_erp_start_timer(fsf_req); 1558 zfcp_fsf_start_erp_timer(req);
2618 retval = zfcp_fsf_req_send(fsf_req); 1559 retval = zfcp_fsf_req_send(req);
2619 if (retval) { 1560 if (retval) {
2620 ZFCP_LOG_INFO("error: Could not send a close port request for " 1561 zfcp_fsf_req_free(req);
2621 "port 0x%016Lx on adapter %s.\n",
2622 erp_action->port->wwpn,
2623 zfcp_get_busid_by_adapter(erp_action->adapter));
2624 zfcp_fsf_req_free(fsf_req);
2625 erp_action->fsf_req = NULL; 1562 erp_action->fsf_req = NULL;
2626 goto out;
2627 } 1563 }
2628 1564out:
2629 ZFCP_LOG_TRACE("close port request initiated " 1565 spin_unlock(&adapter->req_q.lock);
2630 "(adapter %s, port 0x%016Lx)\n",
2631 zfcp_get_busid_by_adapter(erp_action->adapter),
2632 erp_action->port->wwpn);
2633 out:
2634 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2635 lock_flags);
2636 return retval; 1566 return retval;
2637} 1567}
2638 1568
2639/* 1569static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
2640 * function: zfcp_fsf_close_port_handler
2641 *
2642 * purpose: is called for finished Close Port FSF command
2643 *
2644 * returns:
2645 */
2646static int
2647zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
2648{ 1570{
2649 int retval = -EINVAL; 1571 struct zfcp_port *port = req->data;
2650 struct zfcp_port *port;
2651 1572
2652 port = (struct zfcp_port *) fsf_req->data; 1573 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2653
2654 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2655 /* don't change port status in our bookkeeping */
2656 goto skip_fsfstatus; 1574 goto skip_fsfstatus;
2657 }
2658
2659 /* evaluate FSF status in QTCB */
2660 switch (fsf_req->qtcb->header.fsf_status) {
2661 1575
1576 switch (req->qtcb->header.fsf_status) {
2662 case FSF_PORT_HANDLE_NOT_VALID: 1577 case FSF_PORT_HANDLE_NOT_VALID:
2663 ZFCP_LOG_INFO("Temporary port identifier 0x%x for port " 1578 zfcp_erp_adapter_reopen(port->adapter, 0, 107, req);
2664 "0x%016Lx on adapter %s invalid. This may happen " 1579 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2665 "occasionally.\n", port->handle,
2666 port->wwpn, zfcp_get_busid_by_port(port));
2667 ZFCP_LOG_DEBUG("status qualifier:\n");
2668 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
2669 (char *) &fsf_req->qtcb->header.fsf_status_qual,
2670 sizeof (union fsf_status_qual));
2671 zfcp_erp_adapter_reopen(port->adapter, 0, 107, fsf_req);
2672 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2673 break; 1580 break;
2674
2675 case FSF_ADAPTER_STATUS_AVAILABLE: 1581 case FSF_ADAPTER_STATUS_AVAILABLE:
2676 /* Note: FSF has actually closed the port in this case.
2677 * The status code is just daft. Fingers crossed for a change
2678 */
2679 retval = 0;
2680 break; 1582 break;
2681
2682 case FSF_GOOD: 1583 case FSF_GOOD:
2683 ZFCP_LOG_TRACE("remote port 0x016%Lx on adapter %s closed, " 1584 zfcp_erp_modify_port_status(port, 33, req,
2684 "port handle 0x%x\n", port->wwpn,
2685 zfcp_get_busid_by_port(port), port->handle);
2686 zfcp_erp_modify_port_status(port, 33, fsf_req,
2687 ZFCP_STATUS_COMMON_OPEN, 1585 ZFCP_STATUS_COMMON_OPEN,
2688 ZFCP_CLEAR); 1586 ZFCP_CLEAR);
2689 retval = 0;
2690 break;
2691
2692 default:
2693 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
2694 "(debug info 0x%x)\n",
2695 fsf_req->qtcb->header.fsf_status);
2696 break; 1587 break;
2697 } 1588 }
2698 1589
2699 skip_fsfstatus: 1590skip_fsfstatus:
2700 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status); 1591 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
2701 return retval;
2702} 1592}
2703 1593
2704/* 1594/**
2705 * function: zfcp_fsf_close_physical_port 1595 * zfcp_fsf_close_port - create and send close port request
2706 * 1596 * @erp_action: pointer to struct zfcp_erp_action
2707 * purpose: submit FSF command "close physical port" 1597 * Returns: 0 on success, error otherwise
2708 *
2709 * returns: address of initiated FSF request
2710 * NULL - request could not be initiated
2711 */ 1598 */
2712int 1599int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2713zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2714{ 1600{
2715 volatile struct qdio_buffer_element *sbale; 1601 volatile struct qdio_buffer_element *sbale;
2716 struct zfcp_fsf_req *fsf_req; 1602 struct zfcp_adapter *adapter = erp_action->adapter;
2717 unsigned long lock_flags; 1603 struct zfcp_fsf_req *req;
2718 int retval = 0; 1604 int retval = -EIO;
2719
2720 /* setup new FSF request */
2721 retval = zfcp_fsf_req_create(erp_action->adapter,
2722 FSF_QTCB_CLOSE_PHYSICAL_PORT,
2723 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2724 erp_action->adapter->pool.fsf_req_erp,
2725 &lock_flags, &fsf_req);
2726 if (retval < 0) {
2727 ZFCP_LOG_INFO("error: Could not create close physical port "
2728 "request (adapter %s, port 0x%016Lx)\n",
2729 zfcp_get_busid_by_adapter(erp_action->adapter),
2730 erp_action->port->wwpn);
2731 1605
1606 spin_lock(&adapter->req_q.lock);
1607 if (zfcp_fsf_req_sbal_get(adapter))
1608 goto out;
1609
1610 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1611 ZFCP_REQ_AUTO_CLEANUP,
1612 adapter->pool.fsf_req_erp);
1613 if (unlikely(IS_ERR(req))) {
1614 retval = PTR_ERR(req);
2732 goto out; 1615 goto out;
2733 } 1616 }
2734 1617
2735 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1618 sbale = zfcp_qdio_sbale_req(req);
2736 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1619 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2737 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1620 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2738 1621
2739 /* mark port as being closed */ 1622 req->handler = zfcp_fsf_close_port_handler;
2740 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, 1623 req->data = erp_action->port;
2741 &erp_action->port->status); 1624 req->erp_action = erp_action;
2742 /* save a pointer to this port */ 1625 req->qtcb->header.port_handle = erp_action->port->handle;
2743 fsf_req->data = (unsigned long) erp_action->port; 1626 erp_action->fsf_req = req;
2744 fsf_req->qtcb->header.port_handle = erp_action->port->handle; 1627 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
2745 fsf_req->erp_action = erp_action; 1628
2746 erp_action->fsf_req = fsf_req; 1629 zfcp_fsf_start_erp_timer(req);
2747 1630 retval = zfcp_fsf_req_send(req);
2748 zfcp_erp_start_timer(fsf_req);
2749 retval = zfcp_fsf_req_send(fsf_req);
2750 if (retval) { 1631 if (retval) {
2751 ZFCP_LOG_INFO("error: Could not send close physical port " 1632 zfcp_fsf_req_free(req);
2752 "request (adapter %s, port 0x%016Lx)\n",
2753 zfcp_get_busid_by_adapter(erp_action->adapter),
2754 erp_action->port->wwpn);
2755 zfcp_fsf_req_free(fsf_req);
2756 erp_action->fsf_req = NULL; 1633 erp_action->fsf_req = NULL;
2757 goto out;
2758 } 1634 }
2759 1635out:
2760 ZFCP_LOG_TRACE("close physical port request initiated " 1636 spin_unlock(&adapter->req_q.lock);
2761 "(adapter %s, port 0x%016Lx)\n",
2762 zfcp_get_busid_by_adapter(erp_action->adapter),
2763 erp_action->port->wwpn);
2764 out:
2765 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2766 lock_flags);
2767 return retval; 1637 return retval;
2768} 1638}
2769 1639
2770/* 1640static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
2771 * function: zfcp_fsf_close_physical_port_handler
2772 *
2773 * purpose: is called for finished Close Physical Port FSF command
2774 *
2775 * returns:
2776 */
2777static int
2778zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
2779{ 1641{
2780 int retval = -EINVAL; 1642 struct zfcp_port *port = req->data;
2781 struct zfcp_port *port; 1643 struct fsf_qtcb_header *header = &req->qtcb->header;
2782 struct zfcp_unit *unit; 1644 struct zfcp_unit *unit;
2783 struct fsf_qtcb_header *header;
2784 u16 subtable, rule, counter;
2785 1645
2786 port = (struct zfcp_port *) fsf_req->data; 1646 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2787 header = &fsf_req->qtcb->header;
2788
2789 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2790 /* don't change port status in our bookkeeping */
2791 goto skip_fsfstatus; 1647 goto skip_fsfstatus;
2792 }
2793 1648
2794 /* evaluate FSF status in QTCB */
2795 switch (header->fsf_status) { 1649 switch (header->fsf_status) {
2796
2797 case FSF_PORT_HANDLE_NOT_VALID: 1650 case FSF_PORT_HANDLE_NOT_VALID:
2798 ZFCP_LOG_INFO("Temporary port identifier 0x%x invalid" 1651 zfcp_erp_adapter_reopen(port->adapter, 0, 108, req);
2799 "(adapter %s, port 0x%016Lx). " 1652 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2800 "This may happen occasionally.\n",
2801 port->handle,
2802 zfcp_get_busid_by_port(port),
2803 port->wwpn);
2804 ZFCP_LOG_DEBUG("status qualifier:\n");
2805 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
2806 (char *) &header->fsf_status_qual,
2807 sizeof (union fsf_status_qual));
2808 zfcp_erp_adapter_reopen(port->adapter, 0, 108, fsf_req);
2809 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2810 break; 1653 break;
2811
2812 case FSF_ACCESS_DENIED: 1654 case FSF_ACCESS_DENIED:
2813 ZFCP_LOG_NORMAL("Access denied, cannot close " 1655 zfcp_fsf_access_denied_port(req, port);
2814 "physical port 0x%016Lx on adapter %s\n",
2815 port->wwpn, zfcp_get_busid_by_port(port));
2816 for (counter = 0; counter < 2; counter++) {
2817 subtable = header->fsf_status_qual.halfword[counter * 2];
2818 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
2819 switch (subtable) {
2820 case FSF_SQ_CFDC_SUBTABLE_OS:
2821 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
2822 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
2823 case FSF_SQ_CFDC_SUBTABLE_LUN:
2824 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
2825 zfcp_act_subtable_type[subtable], rule);
2826 break;
2827 }
2828 }
2829 zfcp_erp_port_access_denied(port, 58, fsf_req);
2830 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2831 break; 1656 break;
2832
2833 case FSF_PORT_BOXED: 1657 case FSF_PORT_BOXED:
2834 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter " 1658 zfcp_erp_port_boxed(port, 50, req);
2835 "%s needs to be reopened but it was attempted " 1659 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2836 "to close it physically.\n", 1660 ZFCP_STATUS_FSFREQ_RETRY;
2837 port->wwpn,
2838 zfcp_get_busid_by_port(port));
2839 zfcp_erp_port_boxed(port, 50, fsf_req);
2840 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2841 ZFCP_STATUS_FSFREQ_RETRY;
2842
2843 /* can't use generic zfcp_erp_modify_port_status because 1661 /* can't use generic zfcp_erp_modify_port_status because
2844 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1662 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
2845 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1663 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
@@ -2847,154 +1665,88 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
2847 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1665 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
2848 &unit->status); 1666 &unit->status);
2849 break; 1667 break;
2850
2851 case FSF_ADAPTER_STATUS_AVAILABLE: 1668 case FSF_ADAPTER_STATUS_AVAILABLE:
2852 switch (header->fsf_status_qual.word[0]) { 1669 switch (header->fsf_status_qual.word[0]) {
2853 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1670 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2854 /* This will now be escalated by ERP */ 1671 /* fall through */
2855 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2856 break;
2857 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1672 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2858 /* ERP strategy will escalate */ 1673 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2859 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2860 break;
2861 default:
2862 ZFCP_LOG_NORMAL
2863 ("bug: Wrong status qualifier 0x%x arrived.\n",
2864 header->fsf_status_qual.word[0]);
2865 break; 1674 break;
2866 } 1675 }
2867 break; 1676 break;
2868
2869 case FSF_GOOD: 1677 case FSF_GOOD:
2870 ZFCP_LOG_DEBUG("Remote port 0x%016Lx via adapter %s "
2871 "physically closed, port handle 0x%x\n",
2872 port->wwpn,
2873 zfcp_get_busid_by_port(port), port->handle);
2874 /* can't use generic zfcp_erp_modify_port_status because 1678 /* can't use generic zfcp_erp_modify_port_status because
2875 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1679 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
2876 */ 1680 */
2877 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1681 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2878 list_for_each_entry(unit, &port->unit_list_head, list) 1682 list_for_each_entry(unit, &port->unit_list_head, list)
2879 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1683 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
2880 retval = 0; 1684 &unit->status);
2881 break;
2882
2883 default:
2884 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
2885 "(debug info 0x%x)\n",
2886 header->fsf_status);
2887 break; 1685 break;
2888 } 1686 }
2889 1687skip_fsfstatus:
2890 skip_fsfstatus:
2891 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status); 1688 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
2892 return retval;
2893} 1689}
2894 1690
2895/* 1691/**
2896 * function: zfcp_fsf_open_unit 1692 * zfcp_fsf_close_physical_port - close physical port
2897 * 1693 * @erp_action: pointer to struct zfcp_erp_action
2898 * purpose: 1694 * Returns: 0 on success
2899 *
2900 * returns:
2901 *
2902 * assumptions: This routine does not check whether the associated
2903 * remote port has already been opened. This should be
2904 * done by calling routines. Otherwise some status
2905 * may be presented by FSF
2906 */ 1695 */
2907int 1696int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2908zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2909{ 1697{
2910 volatile struct qdio_buffer_element *sbale; 1698 volatile struct qdio_buffer_element *sbale;
2911 struct zfcp_fsf_req *fsf_req; 1699 struct zfcp_adapter *adapter = erp_action->adapter;
2912 unsigned long lock_flags; 1700 struct zfcp_fsf_req *req;
2913 int retval = 0; 1701 int retval = -EIO;
2914 1702
2915 /* setup new FSF request */ 1703 spin_lock(&adapter->req_q.lock);
2916 retval = zfcp_fsf_req_create(erp_action->adapter, 1704 if (zfcp_fsf_req_sbal_get(adapter))
2917 FSF_QTCB_OPEN_LUN, 1705 goto out;
2918 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 1706
2919 erp_action->adapter->pool.fsf_req_erp, 1707 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT,
2920 &lock_flags, &fsf_req); 1708 ZFCP_REQ_AUTO_CLEANUP,
2921 if (retval < 0) { 1709 adapter->pool.fsf_req_erp);
2922 ZFCP_LOG_INFO("error: Could not create open unit request for " 1710 if (unlikely(IS_ERR(req))) {
2923 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 1711 retval = PTR_ERR(req);
2924 erp_action->unit->fcp_lun,
2925 erp_action->unit->port->wwpn,
2926 zfcp_get_busid_by_adapter(erp_action->adapter));
2927 goto out; 1712 goto out;
2928 } 1713 }
2929 1714
2930 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1715 sbale = zfcp_qdio_sbale_req(req);
2931 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1716 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2932 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1717 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2933 1718
2934 fsf_req->qtcb->header.port_handle = erp_action->port->handle; 1719 req->data = erp_action->port;
2935 fsf_req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; 1720 req->qtcb->header.port_handle = erp_action->port->handle;
2936 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1721 req->erp_action = erp_action;
2937 fsf_req->qtcb->bottom.support.option = 1722 req->handler = zfcp_fsf_close_physical_port_handler;
2938 FSF_OPEN_LUN_SUPPRESS_BOXING; 1723 erp_action->fsf_req = req;
2939 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); 1724 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
2940 fsf_req->data = (unsigned long) erp_action->unit; 1725 &erp_action->port->status);
2941 fsf_req->erp_action = erp_action;
2942 erp_action->fsf_req = fsf_req;
2943 1726
2944 zfcp_erp_start_timer(fsf_req); 1727 zfcp_fsf_start_erp_timer(req);
2945 retval = zfcp_fsf_req_send(erp_action->fsf_req); 1728 retval = zfcp_fsf_req_send(req);
2946 if (retval) { 1729 if (retval) {
2947 ZFCP_LOG_INFO("error: Could not send an open unit request " 1730 zfcp_fsf_req_free(req);
2948 "on the adapter %s, port 0x%016Lx for "
2949 "unit 0x%016Lx\n",
2950 zfcp_get_busid_by_adapter(erp_action->adapter),
2951 erp_action->port->wwpn,
2952 erp_action->unit->fcp_lun);
2953 zfcp_fsf_req_free(fsf_req);
2954 erp_action->fsf_req = NULL; 1731 erp_action->fsf_req = NULL;
2955 goto out;
2956 } 1732 }
2957 1733out:
2958 ZFCP_LOG_TRACE("Open LUN request initiated (adapter %s, " 1734 spin_unlock(&adapter->req_q.lock);
2959 "port 0x%016Lx, unit 0x%016Lx)\n",
2960 zfcp_get_busid_by_adapter(erp_action->adapter),
2961 erp_action->port->wwpn, erp_action->unit->fcp_lun);
2962 out:
2963 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
2964 lock_flags);
2965 return retval; 1735 return retval;
2966} 1736}
2967 1737
2968/* 1738static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
2969 * function: zfcp_fsf_open_unit_handler
2970 *
2971 * purpose: is called for finished Open LUN command
2972 *
2973 * returns:
2974 */
2975static int
2976zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
2977{ 1739{
2978 int retval = -EINVAL; 1740 struct zfcp_adapter *adapter = req->adapter;
2979 struct zfcp_adapter *adapter; 1741 struct zfcp_unit *unit = req->data;
2980 struct zfcp_unit *unit; 1742 struct fsf_qtcb_header *header = &req->qtcb->header;
2981 struct fsf_qtcb_header *header; 1743 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
2982 struct fsf_qtcb_bottom_support *bottom; 1744 struct fsf_queue_designator *queue_designator =
2983 struct fsf_queue_designator *queue_designator; 1745 &header->fsf_status_qual.fsf_queue_designator;
2984 u16 subtable, rule, counter;
2985 int exclusive, readwrite; 1746 int exclusive, readwrite;
2986 1747
2987 unit = (struct zfcp_unit *) fsf_req->data; 1748 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2988
2989 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2990 /* don't change unit status in our bookkeeping */
2991 goto skip_fsfstatus; 1749 goto skip_fsfstatus;
2992 }
2993
2994 adapter = fsf_req->adapter;
2995 header = &fsf_req->qtcb->header;
2996 bottom = &fsf_req->qtcb->bottom.support;
2997 queue_designator = &header->fsf_status_qual.fsf_queue_designator;
2998 1750
2999 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1751 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
3000 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1752 ZFCP_STATUS_COMMON_ACCESS_BOXED |
@@ -3002,155 +1754,65 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
3002 ZFCP_STATUS_UNIT_READONLY, 1754 ZFCP_STATUS_UNIT_READONLY,
3003 &unit->status); 1755 &unit->status);
3004 1756
3005 /* evaluate FSF status in QTCB */
3006 switch (header->fsf_status) { 1757 switch (header->fsf_status) {
3007 1758
3008 case FSF_PORT_HANDLE_NOT_VALID: 1759 case FSF_PORT_HANDLE_NOT_VALID:
3009 ZFCP_LOG_INFO("Temporary port identifier 0x%x " 1760 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, req);
3010 "for port 0x%016Lx on adapter %s invalid " 1761 /* fall through */
3011 "This may happen occasionally\n",
3012 unit->port->handle,
3013 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3014 ZFCP_LOG_DEBUG("status qualifier:\n");
3015 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3016 (char *) &header->fsf_status_qual,
3017 sizeof (union fsf_status_qual));
3018 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, fsf_req);
3019 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3020 break;
3021
3022 case FSF_LUN_ALREADY_OPEN: 1762 case FSF_LUN_ALREADY_OPEN:
3023 ZFCP_LOG_NORMAL("bug: Attempted to open unit 0x%016Lx on "
3024 "remote port 0x%016Lx on adapter %s twice.\n",
3025 unit->fcp_lun,
3026 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3027 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3028 break; 1763 break;
3029
3030 case FSF_ACCESS_DENIED: 1764 case FSF_ACCESS_DENIED:
3031 ZFCP_LOG_NORMAL("Access denied, cannot open unit 0x%016Lx on " 1765 zfcp_fsf_access_denied_unit(req, unit);
3032 "remote port 0x%016Lx on adapter %s\n",
3033 unit->fcp_lun, unit->port->wwpn,
3034 zfcp_get_busid_by_unit(unit));
3035 for (counter = 0; counter < 2; counter++) {
3036 subtable = header->fsf_status_qual.halfword[counter * 2];
3037 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
3038 switch (subtable) {
3039 case FSF_SQ_CFDC_SUBTABLE_OS:
3040 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
3041 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
3042 case FSF_SQ_CFDC_SUBTABLE_LUN:
3043 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
3044 zfcp_act_subtable_type[subtable], rule);
3045 break;
3046 }
3047 }
3048 zfcp_erp_unit_access_denied(unit, 59, fsf_req);
3049 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1766 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
3050 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); 1767 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
3051 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3052 break; 1768 break;
3053
3054 case FSF_PORT_BOXED: 1769 case FSF_PORT_BOXED:
3055 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " 1770 zfcp_erp_port_boxed(unit->port, 51, req);
3056 "needs to be reopened\n", 1771 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3057 unit->port->wwpn, zfcp_get_busid_by_unit(unit)); 1772 ZFCP_STATUS_FSFREQ_RETRY;
3058 zfcp_erp_port_boxed(unit->port, 51, fsf_req);
3059 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3060 ZFCP_STATUS_FSFREQ_RETRY;
3061 break; 1773 break;
3062
3063 case FSF_LUN_SHARING_VIOLATION: 1774 case FSF_LUN_SHARING_VIOLATION:
3064 if (header->fsf_status_qual.word[0] != 0) { 1775 if (header->fsf_status_qual.word[0])
3065 ZFCP_LOG_NORMAL("FCP-LUN 0x%Lx at the remote port " 1776 dev_warn(&adapter->ccw_device->dev,
3066 "with WWPN 0x%Lx " 1777 "FCP-LUN 0x%Lx at the remote port "
3067 "connected to the adapter %s " 1778 "with WWPN 0x%Lx "
3068 "is already in use in LPAR%d, CSS%d\n", 1779 "connected to the adapter "
3069 unit->fcp_lun, 1780 "is already in use in LPAR%d, CSS%d.\n",
3070 unit->port->wwpn, 1781 unit->fcp_lun,
3071 zfcp_get_busid_by_unit(unit), 1782 unit->port->wwpn,
3072 queue_designator->hla, 1783 queue_designator->hla,
3073 queue_designator->cssid); 1784 queue_designator->cssid);
3074 } else { 1785 else
3075 subtable = header->fsf_status_qual.halfword[4]; 1786 zfcp_act_eval_err(adapter,
3076 rule = header->fsf_status_qual.halfword[5]; 1787 header->fsf_status_qual.word[2]);
3077 switch (subtable) { 1788 zfcp_erp_unit_access_denied(unit, 60, req);
3078 case FSF_SQ_CFDC_SUBTABLE_OS:
3079 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
3080 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
3081 case FSF_SQ_CFDC_SUBTABLE_LUN:
3082 ZFCP_LOG_NORMAL("Access to FCP-LUN 0x%Lx at the "
3083 "remote port with WWPN 0x%Lx "
3084 "connected to the adapter %s "
3085 "is denied (%s rule %d)\n",
3086 unit->fcp_lun,
3087 unit->port->wwpn,
3088 zfcp_get_busid_by_unit(unit),
3089 zfcp_act_subtable_type[subtable],
3090 rule);
3091 break;
3092 }
3093 }
3094 ZFCP_LOG_DEBUG("status qualifier:\n");
3095 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3096 (char *) &header->fsf_status_qual,
3097 sizeof (union fsf_status_qual));
3098 zfcp_erp_unit_access_denied(unit, 60, fsf_req);
3099 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); 1789 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
3100 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); 1790 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
3101 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1791 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3102 break; 1792 break;
3103
3104 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1793 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
3105 ZFCP_LOG_INFO("error: The adapter ran out of resources. " 1794 dev_warn(&adapter->ccw_device->dev,
3106 "There is no handle (temporary port identifier) " 1795 "The adapter ran out of resources. There is no "
3107 "available for unit 0x%016Lx on port 0x%016Lx " 1796 "handle available for unit 0x%016Lx on port 0x%016Lx.",
3108 "on adapter %s\n", 1797 unit->fcp_lun, unit->port->wwpn);
3109 unit->fcp_lun, 1798 zfcp_erp_unit_failed(unit, 34, req);
3110 unit->port->wwpn, 1799 /* fall through */
3111 zfcp_get_busid_by_unit(unit)); 1800 case FSF_INVALID_COMMAND_OPTION:
3112 zfcp_erp_unit_failed(unit, 34, fsf_req); 1801 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3113 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3114 break; 1802 break;
3115
3116 case FSF_ADAPTER_STATUS_AVAILABLE: 1803 case FSF_ADAPTER_STATUS_AVAILABLE:
3117 switch (header->fsf_status_qual.word[0]) { 1804 switch (header->fsf_status_qual.word[0]) {
3118 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1805 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
3119 /* Re-establish link to port */
3120 zfcp_test_link(unit->port); 1806 zfcp_test_link(unit->port);
3121 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1807 /* fall through */
3122 break;
3123 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1808 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
3124 /* ERP strategy will escalate */ 1809 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3125 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3126 break; 1810 break;
3127 default:
3128 ZFCP_LOG_NORMAL
3129 ("bug: Wrong status qualifier 0x%x arrived.\n",
3130 header->fsf_status_qual.word[0]);
3131 } 1811 }
3132 break; 1812 break;
3133 1813
3134 case FSF_INVALID_COMMAND_OPTION:
3135 ZFCP_LOG_NORMAL(
3136 "Invalid option 0x%x has been specified "
3137 "in QTCB bottom sent to the adapter %s\n",
3138 bottom->option,
3139 zfcp_get_busid_by_adapter(adapter));
3140 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3141 retval = -EINVAL;
3142 break;
3143
3144 case FSF_GOOD: 1814 case FSF_GOOD:
3145 /* save LUN handle assigned by FSF */
3146 unit->handle = header->lun_handle; 1815 unit->handle = header->lun_handle;
3147 ZFCP_LOG_TRACE("unit 0x%016Lx on remote port 0x%016Lx on "
3148 "adapter %s opened, port handle 0x%x\n",
3149 unit->fcp_lun,
3150 unit->port->wwpn,
3151 zfcp_get_busid_by_unit(unit),
3152 unit->handle);
3153 /* mark unit as open */
3154 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1816 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
3155 1817
3156 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && 1818 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
@@ -3168,1528 +1830,629 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
3168 if (!readwrite) { 1830 if (!readwrite) {
3169 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, 1831 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
3170 &unit->status); 1832 &unit->status);
3171 ZFCP_LOG_NORMAL("read-only access for unit " 1833 dev_info(&adapter->ccw_device->dev,
3172 "(adapter %s, wwpn=0x%016Lx, " 1834 "Read-only access for unit 0x%016Lx "
3173 "fcp_lun=0x%016Lx)\n", 1835 "on port 0x%016Lx.\n",
3174 zfcp_get_busid_by_unit(unit), 1836 unit->fcp_lun, unit->port->wwpn);
3175 unit->port->wwpn,
3176 unit->fcp_lun);
3177 } 1837 }
3178 1838
3179 if (exclusive && !readwrite) { 1839 if (exclusive && !readwrite) {
3180 ZFCP_LOG_NORMAL("exclusive access of read-only " 1840 dev_err(&adapter->ccw_device->dev,
3181 "unit not supported\n"); 1841 "Exclusive access of read-only unit "
3182 zfcp_erp_unit_failed(unit, 35, fsf_req); 1842 "0x%016Lx on port 0x%016Lx not "
3183 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1843 "supported, disabling unit.\n",
3184 zfcp_erp_unit_shutdown(unit, 0, 80, fsf_req); 1844 unit->fcp_lun, unit->port->wwpn);
1845 zfcp_erp_unit_failed(unit, 35, req);
1846 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1847 zfcp_erp_unit_shutdown(unit, 0, 80, req);
3185 } else if (!exclusive && readwrite) { 1848 } else if (!exclusive && readwrite) {
3186 ZFCP_LOG_NORMAL("shared access of read-write " 1849 dev_err(&adapter->ccw_device->dev,
3187 "unit not supported\n"); 1850 "Shared access of read-write unit "
3188 zfcp_erp_unit_failed(unit, 36, fsf_req); 1851 "0x%016Lx on port 0x%016Lx not "
3189 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1852 "supported, disabling unit.\n",
3190 zfcp_erp_unit_shutdown(unit, 0, 81, fsf_req); 1853 unit->fcp_lun, unit->port->wwpn);
1854 zfcp_erp_unit_failed(unit, 36, req);
1855 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1856 zfcp_erp_unit_shutdown(unit, 0, 81, req);
3191 } 1857 }
3192 } 1858 }
3193
3194 retval = 0;
3195 break;
3196
3197 default:
3198 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
3199 "(debug info 0x%x)\n",
3200 header->fsf_status);
3201 break; 1859 break;
3202 } 1860 }
3203 1861
3204 skip_fsfstatus: 1862skip_fsfstatus:
3205 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status); 1863 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
3206 return retval;
3207} 1864}
3208 1865
3209/* 1866/**
3210 * function: zfcp_fsf_close_unit 1867 * zfcp_fsf_open_unit - open unit
3211 * 1868 * @erp_action: pointer to struct zfcp_erp_action
3212 * purpose: 1869 * Returns: 0 on success, error otherwise
3213 *
3214 * returns: address of fsf_req - request successfully initiated
3215 * NULL -
3216 *
3217 * assumptions: This routine does not check whether the associated
3218 * remote port/lun has already been opened. This should be
3219 * done by calling routines. Otherwise some status
3220 * may be presented by FSF
3221 */ 1870 */
3222int 1871int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
3223zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3224{ 1872{
3225 volatile struct qdio_buffer_element *sbale; 1873 volatile struct qdio_buffer_element *sbale;
3226 struct zfcp_fsf_req *fsf_req; 1874 struct zfcp_adapter *adapter = erp_action->adapter;
3227 unsigned long lock_flags; 1875 struct zfcp_fsf_req *req;
3228 int retval = 0; 1876 int retval = -EIO;
3229 1877
3230 /* setup new FSF request */ 1878 spin_lock(&adapter->req_q.lock);
3231 retval = zfcp_fsf_req_create(erp_action->adapter, 1879 if (zfcp_fsf_req_sbal_get(adapter))
3232 FSF_QTCB_CLOSE_LUN, 1880 goto out;
3233 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 1881
3234 erp_action->adapter->pool.fsf_req_erp, 1882 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN,
3235 &lock_flags, &fsf_req); 1883 ZFCP_REQ_AUTO_CLEANUP,
3236 if (retval < 0) { 1884 adapter->pool.fsf_req_erp);
3237 ZFCP_LOG_INFO("error: Could not create close unit request for " 1885 if (unlikely(IS_ERR(req))) {
3238 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 1886 retval = PTR_ERR(req);
3239 erp_action->unit->fcp_lun,
3240 erp_action->port->wwpn,
3241 zfcp_get_busid_by_adapter(erp_action->adapter));
3242 goto out; 1887 goto out;
3243 } 1888 }
3244 1889
3245 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1890 sbale = zfcp_qdio_sbale_req(req);
3246 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1891 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
3247 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1892 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3248 1893
3249 fsf_req->qtcb->header.port_handle = erp_action->port->handle; 1894 req->qtcb->header.port_handle = erp_action->port->handle;
3250 fsf_req->qtcb->header.lun_handle = erp_action->unit->handle; 1895 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
3251 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); 1896 req->handler = zfcp_fsf_open_unit_handler;
3252 fsf_req->data = (unsigned long) erp_action->unit; 1897 req->data = erp_action->unit;
3253 fsf_req->erp_action = erp_action; 1898 req->erp_action = erp_action;
3254 erp_action->fsf_req = fsf_req; 1899 erp_action->fsf_req = req;
3255 1900
3256 zfcp_erp_start_timer(fsf_req); 1901 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
3257 retval = zfcp_fsf_req_send(erp_action->fsf_req); 1902 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1903
1904 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
1905
1906 zfcp_fsf_start_erp_timer(req);
1907 retval = zfcp_fsf_req_send(req);
3258 if (retval) { 1908 if (retval) {
3259 ZFCP_LOG_INFO("error: Could not send a close unit request for " 1909 zfcp_fsf_req_free(req);
3260 "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n",
3261 erp_action->unit->fcp_lun,
3262 erp_action->port->wwpn,
3263 zfcp_get_busid_by_adapter(erp_action->adapter));
3264 zfcp_fsf_req_free(fsf_req);
3265 erp_action->fsf_req = NULL; 1910 erp_action->fsf_req = NULL;
3266 goto out;
3267 } 1911 }
3268 1912out:
3269 ZFCP_LOG_TRACE("Close LUN request initiated (adapter %s, " 1913 spin_unlock(&adapter->req_q.lock);
3270 "port 0x%016Lx, unit 0x%016Lx)\n",
3271 zfcp_get_busid_by_adapter(erp_action->adapter),
3272 erp_action->port->wwpn, erp_action->unit->fcp_lun);
3273 out:
3274 write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
3275 lock_flags);
3276 return retval; 1914 return retval;
3277} 1915}
3278 1916
3279/* 1917static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
3280 * function: zfcp_fsf_close_unit_handler
3281 *
3282 * purpose: is called for finished Close LUN FSF command
3283 *
3284 * returns:
3285 */
3286static int
3287zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3288{ 1918{
3289 int retval = -EINVAL; 1919 struct zfcp_unit *unit = req->data;
3290 struct zfcp_unit *unit;
3291
3292 unit = (struct zfcp_unit *) fsf_req->data;
3293 1920
3294 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1921 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
3295 /* don't change unit status in our bookkeeping */
3296 goto skip_fsfstatus; 1922 goto skip_fsfstatus;
3297 }
3298
3299 /* evaluate FSF status in QTCB */
3300 switch (fsf_req->qtcb->header.fsf_status) {
3301 1923
1924 switch (req->qtcb->header.fsf_status) {
3302 case FSF_PORT_HANDLE_NOT_VALID: 1925 case FSF_PORT_HANDLE_NOT_VALID:
3303 ZFCP_LOG_INFO("Temporary port identifier 0x%x for port " 1926 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, req);
3304 "0x%016Lx on adapter %s invalid. This may " 1927 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3305 "happen in rare circumstances\n",
3306 unit->port->handle,
3307 unit->port->wwpn,
3308 zfcp_get_busid_by_unit(unit));
3309 ZFCP_LOG_DEBUG("status qualifier:\n");
3310 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3311 (char *) &fsf_req->qtcb->header.fsf_status_qual,
3312 sizeof (union fsf_status_qual));
3313 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, fsf_req);
3314 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3315 break; 1928 break;
3316
3317 case FSF_LUN_HANDLE_NOT_VALID: 1929 case FSF_LUN_HANDLE_NOT_VALID:
3318 ZFCP_LOG_INFO("Temporary LUN identifier 0x%x of unit " 1930 zfcp_erp_port_reopen(unit->port, 0, 111, req);
3319 "0x%016Lx on port 0x%016Lx on adapter %s is " 1931 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3320 "invalid. This may happen occasionally.\n",
3321 unit->handle,
3322 unit->fcp_lun,
3323 unit->port->wwpn,
3324 zfcp_get_busid_by_unit(unit));
3325 ZFCP_LOG_DEBUG("Status qualifier data:\n");
3326 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3327 (char *) &fsf_req->qtcb->header.fsf_status_qual,
3328 sizeof (union fsf_status_qual));
3329 zfcp_erp_port_reopen(unit->port, 0, 111, fsf_req);
3330 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3331 break; 1932 break;
3332
3333 case FSF_PORT_BOXED: 1933 case FSF_PORT_BOXED:
3334 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " 1934 zfcp_erp_port_boxed(unit->port, 52, req);
3335 "needs to be reopened\n", 1935 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3336 unit->port->wwpn, 1936 ZFCP_STATUS_FSFREQ_RETRY;
3337 zfcp_get_busid_by_unit(unit));
3338 zfcp_erp_port_boxed(unit->port, 52, fsf_req);
3339 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3340 ZFCP_STATUS_FSFREQ_RETRY;
3341 break; 1937 break;
3342
3343 case FSF_ADAPTER_STATUS_AVAILABLE: 1938 case FSF_ADAPTER_STATUS_AVAILABLE:
3344 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) { 1939 switch (req->qtcb->header.fsf_status_qual.word[0]) {
3345 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1940 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
3346 /* re-establish link to port */
3347 zfcp_test_link(unit->port); 1941 zfcp_test_link(unit->port);
3348 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1942 /* fall through */
3349 break;
3350 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1943 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
3351 /* ERP strategy will escalate */ 1944 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3352 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3353 break;
3354 default:
3355 ZFCP_LOG_NORMAL
3356 ("bug: Wrong status qualifier 0x%x arrived.\n",
3357 fsf_req->qtcb->header.fsf_status_qual.word[0]);
3358 break; 1945 break;
3359 } 1946 }
3360 break; 1947 break;
3361
3362 case FSF_GOOD: 1948 case FSF_GOOD:
3363 ZFCP_LOG_TRACE("unit 0x%016Lx on port 0x%016Lx on adapter %s "
3364 "closed, port handle 0x%x\n",
3365 unit->fcp_lun,
3366 unit->port->wwpn,
3367 zfcp_get_busid_by_unit(unit),
3368 unit->handle);
3369 /* mark unit as closed */
3370 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1949 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
3371 retval = 0;
3372 break;
3373
3374 default:
3375 ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
3376 "(debug info 0x%x)\n",
3377 fsf_req->qtcb->header.fsf_status);
3378 break; 1950 break;
3379 } 1951 }
3380 1952skip_fsfstatus:
3381 skip_fsfstatus:
3382 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status); 1953 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
3383 return retval;
3384} 1954}
3385 1955
3386/** 1956/**
3387 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 1957 * zfcp_fsf_close_unit - close zfcp unit
3388 * @adapter: adapter where scsi command is issued 1958 * @erp_action: pointer to struct zfcp_unit
3389 * @unit: unit where command is sent to 1959 * Returns: 0 on success, error otherwise
3390 * @scsi_cmnd: scsi command to be sent
3391 * @timer: timer to be started when request is initiated
3392 * @req_flags: flags for fsf_request
3393 */ 1960 */
3394int 1961int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3395zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3396 struct zfcp_unit *unit,
3397 struct scsi_cmnd * scsi_cmnd,
3398 int use_timer, int req_flags)
3399{ 1962{
3400 struct zfcp_fsf_req *fsf_req = NULL; 1963 volatile struct qdio_buffer_element *sbale;
3401 struct fcp_cmnd_iu *fcp_cmnd_iu; 1964 struct zfcp_adapter *adapter = erp_action->adapter;
3402 unsigned int sbtype; 1965 struct zfcp_fsf_req *req;
3403 unsigned long lock_flags; 1966 int retval = -EIO;
3404 int real_bytes = 0;
3405 int retval = 0;
3406 int mask;
3407
3408 /* setup new FSF request */
3409 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
3410 adapter->pool.fsf_req_scsi,
3411 &lock_flags, &fsf_req);
3412 if (unlikely(retval < 0)) {
3413 ZFCP_LOG_DEBUG("error: Could not create FCP command request "
3414 "for unit 0x%016Lx on port 0x%016Lx on "
3415 "adapter %s\n",
3416 unit->fcp_lun,
3417 unit->port->wwpn,
3418 zfcp_get_busid_by_adapter(adapter));
3419 goto failed_req_create;
3420 }
3421
3422 if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
3423 &unit->status))) {
3424 retval = -EBUSY;
3425 goto unit_blocked;
3426 }
3427
3428 zfcp_unit_get(unit);
3429 fsf_req->unit = unit;
3430
3431 /* associate FSF request with SCSI request (for look up on abort) */
3432 scsi_cmnd->host_scribble = (unsigned char *) fsf_req->req_id;
3433
3434 /* associate SCSI command with FSF request */
3435 fsf_req->data = (unsigned long) scsi_cmnd;
3436
3437 /* set handles of unit and its parent port in QTCB */
3438 fsf_req->qtcb->header.lun_handle = unit->handle;
3439 fsf_req->qtcb->header.port_handle = unit->port->handle;
3440
3441 /* FSF does not define the structure of the FCP_CMND IU */
3442 fcp_cmnd_iu = (struct fcp_cmnd_iu *)
3443 &(fsf_req->qtcb->bottom.io.fcp_cmnd);
3444
3445 /*
3446 * set depending on data direction:
3447 * data direction bits in SBALE (SB Type)
3448 * data direction bits in QTCB
3449 * data direction bits in FCP_CMND IU
3450 */
3451 switch (scsi_cmnd->sc_data_direction) {
3452 case DMA_NONE:
3453 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
3454 /*
3455 * FIXME(qdio):
3456 * what is the correct type for commands
3457 * without 'real' data buffers?
3458 */
3459 sbtype = SBAL_FLAGS0_TYPE_READ;
3460 break;
3461 case DMA_FROM_DEVICE:
3462 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
3463 sbtype = SBAL_FLAGS0_TYPE_READ;
3464 fcp_cmnd_iu->rddata = 1;
3465 break;
3466 case DMA_TO_DEVICE:
3467 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
3468 sbtype = SBAL_FLAGS0_TYPE_WRITE;
3469 fcp_cmnd_iu->wddata = 1;
3470 break;
3471 case DMA_BIDIRECTIONAL:
3472 default:
3473 /*
3474 * dummy, catch this condition earlier
3475 * in zfcp_scsi_queuecommand
3476 */
3477 goto failed_scsi_cmnd;
3478 }
3479
3480 /* set FC service class in QTCB (3 per default) */
3481 fsf_req->qtcb->bottom.io.service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT;
3482
3483 /* set FCP_LUN in FCP_CMND IU in QTCB */
3484 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
3485
3486 mask = ZFCP_STATUS_UNIT_READONLY | ZFCP_STATUS_UNIT_SHARED;
3487
3488 /* set task attributes in FCP_CMND IU in QTCB */
3489 if (likely((scsi_cmnd->device->simple_tags) ||
3490 (atomic_test_mask(mask, &unit->status))))
3491 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
3492 else
3493 fcp_cmnd_iu->task_attribute = UNTAGGED;
3494
3495 /* set additional length of FCP_CDB in FCP_CMND IU in QTCB, if needed */
3496 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH)) {
3497 fcp_cmnd_iu->add_fcp_cdb_length
3498 = (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
3499 ZFCP_LOG_TRACE("SCSI CDB length is 0x%x, "
3500 "additional FCP_CDB length is 0x%x "
3501 "(shifted right 2 bits)\n",
3502 scsi_cmnd->cmd_len,
3503 fcp_cmnd_iu->add_fcp_cdb_length);
3504 }
3505 /*
3506 * copy SCSI CDB (including additional length, if any) to
3507 * FCP_CDB in FCP_CMND IU in QTCB
3508 */
3509 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3510
3511 /* FCP CMND IU length in QTCB */
3512 fsf_req->qtcb->bottom.io.fcp_cmnd_length =
3513 sizeof (struct fcp_cmnd_iu) +
3514 fcp_cmnd_iu->add_fcp_cdb_length + sizeof (fcp_dl_t);
3515 1967
3516 /* generate SBALEs from data buffer */ 1968 spin_lock(&adapter->req_q.lock);
3517 real_bytes = zfcp_qdio_sbals_from_scsicmnd(fsf_req, sbtype, scsi_cmnd); 1969 if (zfcp_fsf_req_sbal_get(adapter))
3518 if (unlikely(real_bytes < 0)) { 1970 goto out;
3519 if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) { 1971 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
3520 ZFCP_LOG_DEBUG( 1972 ZFCP_REQ_AUTO_CLEANUP,
3521 "Data did not fit into available buffer(s), " 1973 adapter->pool.fsf_req_erp);
3522 "waiting for more...\n"); 1974 if (unlikely(IS_ERR(req))) {
3523 retval = -EIO; 1975 retval = PTR_ERR(req);
3524 } else { 1976 goto out;
3525 ZFCP_LOG_NORMAL("error: No truncation implemented but "
3526 "required. Shutting down unit "
3527 "(adapter %s, port 0x%016Lx, "
3528 "unit 0x%016Lx)\n",
3529 zfcp_get_busid_by_unit(unit),
3530 unit->port->wwpn,
3531 unit->fcp_lun);
3532 zfcp_erp_unit_shutdown(unit, 0, 131, fsf_req);
3533 retval = -EINVAL;
3534 }
3535 goto no_fit;
3536 } 1977 }
3537 1978
3538 /* set length of FCP data length in FCP_CMND IU in QTCB */ 1979 sbale = zfcp_qdio_sbale_req(req);
3539 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes); 1980 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1981 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3540 1982
3541 ZFCP_LOG_DEBUG("Sending SCSI command:\n"); 1983 req->qtcb->header.port_handle = erp_action->port->handle;
3542 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 1984 req->qtcb->header.lun_handle = erp_action->unit->handle;
3543 (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 1985 req->handler = zfcp_fsf_close_unit_handler;
1986 req->data = erp_action->unit;
1987 req->erp_action = erp_action;
1988 erp_action->fsf_req = req;
1989 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
3544 1990
3545 if (use_timer) 1991 zfcp_fsf_start_erp_timer(req);
3546 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); 1992 retval = zfcp_fsf_req_send(req);
3547 1993 if (retval) {
3548 retval = zfcp_fsf_req_send(fsf_req); 1994 zfcp_fsf_req_free(req);
3549 if (unlikely(retval < 0)) { 1995 erp_action->fsf_req = NULL;
3550 ZFCP_LOG_INFO("error: Could not send FCP command request "
3551 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
3552 zfcp_get_busid_by_adapter(adapter),
3553 unit->port->wwpn,
3554 unit->fcp_lun);
3555 goto send_failed;
3556 } 1996 }
3557 1997out:
3558 ZFCP_LOG_TRACE("Send FCP Command initiated (adapter %s, " 1998 spin_unlock(&adapter->req_q.lock);
3559 "port 0x%016Lx, unit 0x%016Lx)\n",
3560 zfcp_get_busid_by_adapter(adapter),
3561 unit->port->wwpn,
3562 unit->fcp_lun);
3563 goto success;
3564
3565 send_failed:
3566 no_fit:
3567 failed_scsi_cmnd:
3568 zfcp_unit_put(unit);
3569 unit_blocked:
3570 zfcp_fsf_req_free(fsf_req);
3571 fsf_req = NULL;
3572 scsi_cmnd->host_scribble = NULL;
3573 success:
3574 failed_req_create:
3575 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
3576 return retval; 1999 return retval;
3577} 2000}
3578 2001
3579struct zfcp_fsf_req * 2002static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
3580zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
3581 struct zfcp_unit *unit,
3582 u8 tm_flags, int req_flags)
3583{ 2003{
3584 struct zfcp_fsf_req *fsf_req = NULL; 2004 lat_rec->sum += lat;
3585 int retval = 0; 2005 lat_rec->min = min(lat_rec->min, lat);
3586 struct fcp_cmnd_iu *fcp_cmnd_iu; 2006 lat_rec->max = max(lat_rec->max, lat);
3587 unsigned long lock_flags;
3588 volatile struct qdio_buffer_element *sbale;
3589
3590 /* setup new FSF request */
3591 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
3592 adapter->pool.fsf_req_scsi,
3593 &lock_flags, &fsf_req);
3594 if (retval < 0) {
3595 ZFCP_LOG_INFO("error: Could not create FCP command (task "
3596 "management) request for adapter %s, port "
3597 " 0x%016Lx, unit 0x%016Lx.\n",
3598 zfcp_get_busid_by_adapter(adapter),
3599 unit->port->wwpn, unit->fcp_lun);
3600 goto out;
3601 }
3602
3603 if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
3604 &unit->status)))
3605 goto unit_blocked;
3606
3607 /*
3608 * Used to decide on proper handler in the return path,
3609 * could be either zfcp_fsf_send_fcp_command_task_handler or
3610 * zfcp_fsf_send_fcp_command_task_management_handler */
3611
3612 fsf_req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
3613
3614 /*
3615 * hold a pointer to the unit being target of this
3616 * task management request
3617 */
3618 fsf_req->data = (unsigned long) unit;
3619
3620 /* set FSF related fields in QTCB */
3621 fsf_req->qtcb->header.lun_handle = unit->handle;
3622 fsf_req->qtcb->header.port_handle = unit->port->handle;
3623 fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
3624 fsf_req->qtcb->bottom.io.service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT;
3625 fsf_req->qtcb->bottom.io.fcp_cmnd_length =
3626 sizeof (struct fcp_cmnd_iu) + sizeof (fcp_dl_t);
3627
3628 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
3629 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
3630 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3631
3632 /* set FCP related fields in FCP_CMND IU in QTCB */
3633 fcp_cmnd_iu = (struct fcp_cmnd_iu *)
3634 &(fsf_req->qtcb->bottom.io.fcp_cmnd);
3635 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
3636 fcp_cmnd_iu->task_management_flags = tm_flags;
3637
3638 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
3639 retval = zfcp_fsf_req_send(fsf_req);
3640 if (!retval)
3641 goto out;
3642
3643 unit_blocked:
3644 zfcp_fsf_req_free(fsf_req);
3645 fsf_req = NULL;
3646
3647 out:
3648 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
3649 return fsf_req;
3650} 2007}
3651 2008
3652/* 2009static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
3653 * function: zfcp_fsf_send_fcp_command_handler
3654 *
3655 * purpose: is called for finished Send FCP Command
3656 *
3657 * returns:
3658 */
3659static int
3660zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3661{ 2010{
3662 int retval = -EINVAL; 2011 struct fsf_qual_latency_info *lat_inf;
3663 struct zfcp_unit *unit; 2012 struct latency_cont *lat;
3664 struct fsf_qtcb_header *header; 2013 struct zfcp_unit *unit = req->unit;
3665 u16 subtable, rule, counter; 2014 unsigned long flags;
3666
3667 header = &fsf_req->qtcb->header;
3668
3669 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
3670 unit = (struct zfcp_unit *) fsf_req->data;
3671 else
3672 unit = fsf_req->unit;
3673
3674 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
3675 /* go directly to calls of special handlers */
3676 goto skip_fsfstatus;
3677 }
3678
3679 /* evaluate FSF status in QTCB */
3680 switch (header->fsf_status) {
3681
3682 case FSF_PORT_HANDLE_NOT_VALID:
3683 ZFCP_LOG_INFO("Temporary port identifier 0x%x for port "
3684 "0x%016Lx on adapter %s invalid\n",
3685 unit->port->handle,
3686 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3687 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3688 (char *) &header->fsf_status_qual,
3689 sizeof (union fsf_status_qual));
3690 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, fsf_req);
3691 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3692 break;
3693
3694 case FSF_LUN_HANDLE_NOT_VALID:
3695 ZFCP_LOG_INFO("Temporary LUN identifier 0x%x for unit "
3696 "0x%016Lx on port 0x%016Lx on adapter %s is "
3697 "invalid. This may happen occasionally.\n",
3698 unit->handle,
3699 unit->fcp_lun,
3700 unit->port->wwpn,
3701 zfcp_get_busid_by_unit(unit));
3702 ZFCP_LOG_NORMAL("Status qualifier data:\n");
3703 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
3704 (char *) &header->fsf_status_qual,
3705 sizeof (union fsf_status_qual));
3706 zfcp_erp_port_reopen(unit->port, 0, 113, fsf_req);
3707 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3708 break;
3709
3710 case FSF_HANDLE_MISMATCH:
3711 ZFCP_LOG_NORMAL("bug: The port handle 0x%x has changed "
3712 "unexpectedly. (adapter %s, port 0x%016Lx, "
3713 "unit 0x%016Lx)\n",
3714 unit->port->handle,
3715 zfcp_get_busid_by_unit(unit),
3716 unit->port->wwpn,
3717 unit->fcp_lun);
3718 ZFCP_LOG_NORMAL("status qualifier:\n");
3719 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
3720 (char *) &header->fsf_status_qual,
3721 sizeof (union fsf_status_qual));
3722 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 114, fsf_req);
3723 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3724 break;
3725
3726 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
3727 ZFCP_LOG_INFO("error: adapter %s does not support fc "
3728 "class %d.\n",
3729 zfcp_get_busid_by_unit(unit),
3730 ZFCP_FC_SERVICE_CLASS_DEFAULT);
3731 /* stop operation for this adapter */
3732 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 132, fsf_req);
3733 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3734 break;
3735
3736 case FSF_FCPLUN_NOT_VALID:
3737 ZFCP_LOG_NORMAL("bug: unit 0x%016Lx on port 0x%016Lx on "
3738 "adapter %s does not have correct unit "
3739 "handle 0x%x\n",
3740 unit->fcp_lun,
3741 unit->port->wwpn,
3742 zfcp_get_busid_by_unit(unit),
3743 unit->handle);
3744 ZFCP_LOG_DEBUG("status qualifier:\n");
3745 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3746 (char *) &header->fsf_status_qual,
3747 sizeof (union fsf_status_qual));
3748 zfcp_erp_port_reopen(unit->port, 0, 115, fsf_req);
3749 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3750 break;
3751
3752 case FSF_ACCESS_DENIED:
3753 ZFCP_LOG_NORMAL("Access denied, cannot send FCP command to "
3754 "unit 0x%016Lx on port 0x%016Lx on "
3755 "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
3756 zfcp_get_busid_by_unit(unit));
3757 for (counter = 0; counter < 2; counter++) {
3758 subtable = header->fsf_status_qual.halfword[counter * 2];
3759 rule = header->fsf_status_qual.halfword[counter * 2 + 1];
3760 switch (subtable) {
3761 case FSF_SQ_CFDC_SUBTABLE_OS:
3762 case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
3763 case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
3764 case FSF_SQ_CFDC_SUBTABLE_LUN:
3765 ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
3766 zfcp_act_subtable_type[subtable], rule);
3767 break;
3768 }
3769 }
3770 zfcp_erp_unit_access_denied(unit, 61, fsf_req);
3771 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3772 break;
3773
3774 case FSF_DIRECTION_INDICATOR_NOT_VALID:
3775 ZFCP_LOG_INFO("bug: Invalid data direction given for unit "
3776 "0x%016Lx on port 0x%016Lx on adapter %s "
3777 "(debug info %d)\n",
3778 unit->fcp_lun,
3779 unit->port->wwpn,
3780 zfcp_get_busid_by_unit(unit),
3781 fsf_req->qtcb->bottom.io.data_direction);
3782 /* stop operation for this adapter */
3783 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, fsf_req);
3784 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3785 break;
3786
3787 case FSF_CMND_LENGTH_NOT_VALID:
3788 ZFCP_LOG_NORMAL
3789 ("bug: An invalid control-data-block length field "
3790 "was found in a command for unit 0x%016Lx on port "
3791 "0x%016Lx on adapter %s " "(debug info %d)\n",
3792 unit->fcp_lun, unit->port->wwpn,
3793 zfcp_get_busid_by_unit(unit),
3794 fsf_req->qtcb->bottom.io.fcp_cmnd_length);
3795 /* stop operation for this adapter */
3796 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, fsf_req);
3797 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3798 break;
3799 2015
3800 case FSF_PORT_BOXED: 2016 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
3801 ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s "
3802 "needs to be reopened\n",
3803 unit->port->wwpn, zfcp_get_busid_by_unit(unit));
3804 zfcp_erp_port_boxed(unit->port, 53, fsf_req);
3805 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
3806 ZFCP_STATUS_FSFREQ_RETRY;
3807 break;
3808 2017
3809 case FSF_LUN_BOXED: 2018 switch (req->qtcb->bottom.io.data_direction) {
3810 ZFCP_LOG_NORMAL("unit needs to be reopened (adapter %s, " 2019 case FSF_DATADIR_READ:
3811 "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", 2020 lat = &unit->latencies.read;
3812 zfcp_get_busid_by_unit(unit),
3813 unit->port->wwpn, unit->fcp_lun);
3814 zfcp_erp_unit_boxed(unit, 54, fsf_req);
3815 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
3816 | ZFCP_STATUS_FSFREQ_RETRY;
3817 break;
3818
3819 case FSF_ADAPTER_STATUS_AVAILABLE:
3820 switch (header->fsf_status_qual.word[0]) {
3821 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
3822 /* re-establish link to port */
3823 zfcp_test_link(unit->port);
3824 break;
3825 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
3826 /* FIXME(hw) need proper specs for proper action */
3827 /* let scsi stack deal with retries and escalation */
3828 break;
3829 default:
3830 ZFCP_LOG_NORMAL
3831 ("Unknown status qualifier 0x%x arrived.\n",
3832 header->fsf_status_qual.word[0]);
3833 break;
3834 }
3835 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3836 break; 2021 break;
3837 2022 case FSF_DATADIR_WRITE:
3838 case FSF_GOOD: 2023 lat = &unit->latencies.write;
3839 break; 2024 break;
3840 2025 case FSF_DATADIR_CMND:
3841 case FSF_FCP_RSP_AVAILABLE: 2026 lat = &unit->latencies.cmd;
3842 break; 2027 break;
2028 default:
2029 return;
3843 } 2030 }
3844 2031
3845 skip_fsfstatus: 2032 spin_lock_irqsave(&unit->latencies.lock, flags);
3846 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) { 2033 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
3847 retval = 2034 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
3848 zfcp_fsf_send_fcp_command_task_management_handler(fsf_req); 2035 lat->counter++;
3849 } else { 2036 spin_unlock_irqrestore(&unit->latencies.lock, flags);
3850 retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
3851 fsf_req->unit = NULL;
3852 zfcp_unit_put(unit);
3853 }
3854 return retval;
3855} 2037}
3856 2038
3857/* 2039static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
3858 * function: zfcp_fsf_send_fcp_command_task_handler
3859 *
3860 * purpose: evaluates FCP_RSP IU
3861 *
3862 * returns:
3863 */
3864static int
3865zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
3866{ 2040{
3867 int retval = 0; 2041 struct scsi_cmnd *scpnt = req->data;
3868 struct scsi_cmnd *scpnt;
3869 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2042 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
3870 &(fsf_req->qtcb->bottom.io.fcp_rsp); 2043 &(req->qtcb->bottom.io.fcp_rsp);
3871 struct fcp_cmnd_iu *fcp_cmnd_iu = (struct fcp_cmnd_iu *)
3872 &(fsf_req->qtcb->bottom.io.fcp_cmnd);
3873 u32 sns_len; 2044 u32 sns_len;
3874 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 2045 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
3875 unsigned long flags; 2046 unsigned long flags;
3876 struct zfcp_unit *unit = fsf_req->unit; 2047
3877 2048 if (unlikely(!scpnt))
3878 read_lock_irqsave(&fsf_req->adapter->abort_lock, flags); 2049 return;
3879 scpnt = (struct scsi_cmnd *) fsf_req->data; 2050
3880 if (unlikely(!scpnt)) { 2051 read_lock_irqsave(&req->adapter->abort_lock, flags);
3881 ZFCP_LOG_DEBUG 2052
3882 ("Command with fsf_req %p is not associated to " 2053 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
3883 "a scsi command anymore. Aborted?\n", fsf_req); 2054 set_host_byte(scpnt, DID_SOFT_ERROR);
3884 goto out; 2055 set_driver_byte(scpnt, SUGGEST_RETRY);
3885 }
3886 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
3887 /* FIXME: (design) mid-layer should handle DID_ABORT like
3888 * DID_SOFT_ERROR by retrying the request for devices
3889 * that allow retries.
3890 */
3891 ZFCP_LOG_DEBUG("Setting DID_SOFT_ERROR and SUGGEST_RETRY\n");
3892 set_host_byte(&scpnt->result, DID_SOFT_ERROR);
3893 set_driver_byte(&scpnt->result, SUGGEST_RETRY);
3894 goto skip_fsfstatus; 2056 goto skip_fsfstatus;
3895 } 2057 }
3896 2058
3897 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2059 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
3898 ZFCP_LOG_DEBUG("Setting DID_ERROR\n"); 2060 set_host_byte(scpnt, DID_ERROR);
3899 set_host_byte(&scpnt->result, DID_ERROR);
3900 goto skip_fsfstatus; 2061 goto skip_fsfstatus;
3901 } 2062 }
3902 2063
3903 /* set message byte of result in SCSI command */ 2064 set_msg_byte(scpnt, COMMAND_COMPLETE);
3904 scpnt->result |= COMMAND_COMPLETE << 8;
3905 2065
3906 /*
3907 * copy SCSI status code of FCP_STATUS of FCP_RSP IU to status byte
3908 * of result in SCSI command
3909 */
3910 scpnt->result |= fcp_rsp_iu->scsi_status; 2066 scpnt->result |= fcp_rsp_iu->scsi_status;
3911 if (unlikely(fcp_rsp_iu->scsi_status)) {
3912 /* DEBUG */
3913 ZFCP_LOG_DEBUG("status for SCSI Command:\n");
3914 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3915 scpnt->cmnd, scpnt->cmd_len);
3916 ZFCP_LOG_DEBUG("SCSI status code 0x%x\n",
3917 fcp_rsp_iu->scsi_status);
3918 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3919 (void *) fcp_rsp_iu, sizeof (struct fcp_rsp_iu));
3920 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3921 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu),
3922 fcp_rsp_iu->fcp_sns_len);
3923 }
3924 2067
3925 /* check FCP_RSP_INFO */ 2068 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2069 zfcp_fsf_req_latency(req);
2070
3926 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) { 2071 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
3927 ZFCP_LOG_DEBUG("rsp_len is valid\n"); 2072 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
3928 switch (fcp_rsp_info[3]) { 2073 set_host_byte(scpnt, DID_OK);
3929 case RSP_CODE_GOOD: 2074 else {
3930 /* ok, continue */ 2075 set_host_byte(scpnt, DID_ERROR);
3931 ZFCP_LOG_TRACE("no failure or Task Management "
3932 "Function complete\n");
3933 set_host_byte(&scpnt->result, DID_OK);
3934 break;
3935 case RSP_CODE_LENGTH_MISMATCH:
3936 /* hardware bug */
3937 ZFCP_LOG_NORMAL("bug: FCP response code indictates "
3938 "that the fibrechannel protocol data "
3939 "length differs from the burst length. "
3940 "The problem occured on unit 0x%016Lx "
3941 "on port 0x%016Lx on adapter %s",
3942 unit->fcp_lun,
3943 unit->port->wwpn,
3944 zfcp_get_busid_by_unit(unit));
3945 /* dump SCSI CDB as prepared by zfcp */
3946 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3947 (char *) &fsf_req->qtcb->
3948 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
3949 set_host_byte(&scpnt->result, DID_ERROR);
3950 goto skip_fsfstatus;
3951 case RSP_CODE_FIELD_INVALID:
3952 /* driver or hardware bug */
3953 ZFCP_LOG_NORMAL("bug: FCP response code indictates "
3954 "that the fibrechannel protocol data "
3955 "fields were incorrectly set up. "
3956 "The problem occured on the unit "
3957 "0x%016Lx on port 0x%016Lx on "
3958 "adapter %s",
3959 unit->fcp_lun,
3960 unit->port->wwpn,
3961 zfcp_get_busid_by_unit(unit));
3962 /* dump SCSI CDB as prepared by zfcp */
3963 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3964 (char *) &fsf_req->qtcb->
3965 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
3966 set_host_byte(&scpnt->result, DID_ERROR);
3967 goto skip_fsfstatus;
3968 case RSP_CODE_RO_MISMATCH:
3969 /* hardware bug */
3970 ZFCP_LOG_NORMAL("bug: The FCP response code indicates "
3971 "that conflicting values for the "
3972 "fibrechannel payload offset from the "
3973 "header were found. "
3974 "The problem occured on unit 0x%016Lx "
3975 "on port 0x%016Lx on adapter %s.\n",
3976 unit->fcp_lun,
3977 unit->port->wwpn,
3978 zfcp_get_busid_by_unit(unit));
3979 /* dump SCSI CDB as prepared by zfcp */
3980 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3981 (char *) &fsf_req->qtcb->
3982 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
3983 set_host_byte(&scpnt->result, DID_ERROR);
3984 goto skip_fsfstatus;
3985 default:
3986 ZFCP_LOG_NORMAL("bug: An invalid FCP response "
3987 "code was detected for a command. "
3988 "The problem occured on the unit "
3989 "0x%016Lx on port 0x%016Lx on "
3990 "adapter %s (debug info 0x%x)\n",
3991 unit->fcp_lun,
3992 unit->port->wwpn,
3993 zfcp_get_busid_by_unit(unit),
3994 fcp_rsp_info[3]);
3995 /* dump SCSI CDB as prepared by zfcp */
3996 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3997 (char *) &fsf_req->qtcb->
3998 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
3999 set_host_byte(&scpnt->result, DID_ERROR);
4000 goto skip_fsfstatus; 2076 goto skip_fsfstatus;
4001 } 2077 }
4002 } 2078 }
4003 2079
4004 /* check for sense data */
4005 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) { 2080 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
4006 sns_len = FSF_FCP_RSP_SIZE - 2081 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
4007 sizeof (struct fcp_rsp_iu) + fcp_rsp_iu->fcp_rsp_len; 2082 fcp_rsp_iu->fcp_rsp_len;
4008 ZFCP_LOG_TRACE("room for %i bytes sense data in QTCB\n",
4009 sns_len);
4010 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE); 2083 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
4011 ZFCP_LOG_TRACE("room for %i bytes sense data in SCSI command\n",
4012 SCSI_SENSE_BUFFERSIZE);
4013 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len); 2084 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
4014 ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n",
4015 scpnt->result);
4016 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
4017 scpnt->cmnd, scpnt->cmd_len);
4018 2085
4019 ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
4020 fcp_rsp_iu->fcp_sns_len);
4021 memcpy(scpnt->sense_buffer, 2086 memcpy(scpnt->sense_buffer,
4022 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len); 2087 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
4023 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
4024 (void *)scpnt->sense_buffer, sns_len);
4025 }
4026
4027 /* check for overrun */
4028 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_over)) {
4029 ZFCP_LOG_INFO("A data overrun was detected for a command. "
4030 "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
4031 "The response data length is "
4032 "%d, the original length was %d.\n",
4033 unit->fcp_lun,
4034 unit->port->wwpn,
4035 zfcp_get_busid_by_unit(unit),
4036 fcp_rsp_iu->fcp_resid,
4037 (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
4038 } 2088 }
4039 2089
4040 /* check for underrun */
4041 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) { 2090 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
4042 ZFCP_LOG_INFO("A data underrun was detected for a command. "
4043 "unit 0x%016Lx, port 0x%016Lx, adapter %s. "
4044 "The response data length is "
4045 "%d, the original length was %d.\n",
4046 unit->fcp_lun,
4047 unit->port->wwpn,
4048 zfcp_get_busid_by_unit(unit),
4049 fcp_rsp_iu->fcp_resid,
4050 (int) zfcp_get_fcp_dl(fcp_cmnd_iu));
4051
4052 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid); 2091 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
4053 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) < 2092 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
4054 scpnt->underflow) 2093 scpnt->underflow)
4055 set_host_byte(&scpnt->result, DID_ERROR); 2094 set_host_byte(scpnt, DID_ERROR);
4056 } 2095 }
4057 2096skip_fsfstatus:
4058 skip_fsfstatus:
4059 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
4060
4061 if (scpnt->result != 0) 2097 if (scpnt->result != 0)
4062 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req); 2098 zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req);
4063 else if (scpnt->retries > 0) 2099 else if (scpnt->retries > 0)
4064 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req); 2100 zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req);
4065 else 2101 else
4066 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req); 2102 zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req);
4067 2103
4068 /* cleanup pointer (need this especially for abort) */
4069 scpnt->host_scribble = NULL; 2104 scpnt->host_scribble = NULL;
4070
4071 /* always call back */
4072 (scpnt->scsi_done) (scpnt); 2105 (scpnt->scsi_done) (scpnt);
4073
4074 /* 2106 /*
4075 * We must hold this lock until scsi_done has been called. 2107 * We must hold this lock until scsi_done has been called.
4076 * Otherwise we may call scsi_done after abort regarding this 2108 * Otherwise we may call scsi_done after abort regarding this
4077 * command has completed. 2109 * command has completed.
4078 * Note: scsi_done must not block! 2110 * Note: scsi_done must not block!
4079 */ 2111 */
4080 out: 2112 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
4081 read_unlock_irqrestore(&fsf_req->adapter->abort_lock, flags);
4082 return retval;
4083} 2113}
4084 2114
4085/* 2115static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
4086 * function: zfcp_fsf_send_fcp_command_task_management_handler
4087 *
4088 * purpose: evaluates FCP_RSP IU
4089 *
4090 * returns:
4091 */
4092static int
4093zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
4094{ 2116{
4095 int retval = 0;
4096 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2117 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
4097 &(fsf_req->qtcb->bottom.io.fcp_rsp); 2118 &(req->qtcb->bottom.io.fcp_rsp);
4098 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 2119 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
4099 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
4100
4101 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
4102 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4103 goto skip_fsfstatus;
4104 }
4105 2120
4106 /* check FCP_RSP_INFO */ 2121 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
4107 switch (fcp_rsp_info[3]) { 2122 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
4108 case RSP_CODE_GOOD: 2123 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4109 /* ok, continue */
4110 ZFCP_LOG_DEBUG("no failure or Task Management "
4111 "Function complete\n");
4112 break;
4113 case RSP_CODE_TASKMAN_UNSUPP:
4114 ZFCP_LOG_NORMAL("bug: A reuested task management function "
4115 "is not supported on the target device "
4116 "unit 0x%016Lx, port 0x%016Lx, adapter %s\n ",
4117 unit->fcp_lun,
4118 unit->port->wwpn,
4119 zfcp_get_busid_by_unit(unit));
4120 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP;
4121 break;
4122 case RSP_CODE_TASKMAN_FAILED:
4123 ZFCP_LOG_NORMAL("bug: A reuested task management function "
4124 "failed to complete successfully. "
4125 "unit 0x%016Lx, port 0x%016Lx, adapter %s.\n",
4126 unit->fcp_lun,
4127 unit->port->wwpn,
4128 zfcp_get_busid_by_unit(unit));
4129 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4130 break;
4131 default:
4132 ZFCP_LOG_NORMAL("bug: An invalid FCP response "
4133 "code was detected for a command. "
4134 "unit 0x%016Lx, port 0x%016Lx, adapter %s "
4135 "(debug info 0x%x)\n",
4136 unit->fcp_lun,
4137 unit->port->wwpn,
4138 zfcp_get_busid_by_unit(unit),
4139 fcp_rsp_info[3]);
4140 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4141 }
4142
4143 skip_fsfstatus:
4144 return retval;
4145} 2124}
4146 2125
4147 2126
4148/* 2127static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
4149 * function: zfcp_fsf_control_file
4150 *
4151 * purpose: Initiator of the control file upload/download FSF requests
4152 *
4153 * returns: 0 - FSF request is successfuly created and queued
4154 * -EOPNOTSUPP - The FCP adapter does not have Control File support
4155 * -EINVAL - Invalid direction specified
4156 * -ENOMEM - Insufficient memory
4157 * -EPERM - Cannot create FSF request or place it in QDIO queue
4158 */
4159int
4160zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4161 struct zfcp_fsf_req **fsf_req_ptr,
4162 u32 fsf_command,
4163 u32 option,
4164 struct zfcp_sg_list *sg_list)
4165{ 2128{
4166 struct zfcp_fsf_req *fsf_req; 2129 struct zfcp_unit *unit;
4167 struct fsf_qtcb_bottom_support *bottom; 2130 struct fsf_qtcb_header *header = &req->qtcb->header;
4168 volatile struct qdio_buffer_element *sbale;
4169 unsigned long lock_flags;
4170 int req_flags = 0;
4171 int direction;
4172 int retval = 0;
4173
4174 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) {
4175 ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
4176 zfcp_get_busid_by_adapter(adapter));
4177 retval = -EOPNOTSUPP;
4178 goto out;
4179 }
4180
4181 switch (fsf_command) {
4182
4183 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
4184 direction = SBAL_FLAGS0_TYPE_WRITE;
4185 if ((option != FSF_CFDC_OPTION_FULL_ACCESS) &&
4186 (option != FSF_CFDC_OPTION_RESTRICTED_ACCESS))
4187 req_flags = ZFCP_WAIT_FOR_SBAL;
4188 break;
4189
4190 case FSF_QTCB_UPLOAD_CONTROL_FILE:
4191 direction = SBAL_FLAGS0_TYPE_READ;
4192 break;
4193
4194 default:
4195 ZFCP_LOG_INFO("Invalid FSF command code 0x%08x\n", fsf_command);
4196 retval = -EINVAL;
4197 goto out;
4198 }
4199
4200 retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags,
4201 NULL, &lock_flags, &fsf_req);
4202 if (retval < 0) {
4203 ZFCP_LOG_INFO("error: Could not create FSF request for the "
4204 "adapter %s\n",
4205 zfcp_get_busid_by_adapter(adapter));
4206 retval = -EPERM;
4207 goto unlock_queue_lock;
4208 }
4209
4210 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
4211 sbale[0].flags |= direction;
4212
4213 bottom = &fsf_req->qtcb->bottom.support;
4214 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
4215 bottom->option = option;
4216
4217 if (sg_list->count > 0) {
4218 int bytes;
4219
4220 bytes = zfcp_qdio_sbals_from_sg(fsf_req, direction,
4221 sg_list->sg, sg_list->count,
4222 ZFCP_MAX_SBALS_PER_REQ);
4223 if (bytes != ZFCP_CFDC_MAX_CONTROL_FILE_SIZE) {
4224 ZFCP_LOG_INFO(
4225 "error: Could not create sufficient number of "
4226 "SBALS for an FSF request to the adapter %s\n",
4227 zfcp_get_busid_by_adapter(adapter));
4228 retval = -ENOMEM;
4229 goto free_fsf_req;
4230 }
4231 } else
4232 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
4233
4234 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
4235 retval = zfcp_fsf_req_send(fsf_req);
4236 if (retval < 0) {
4237 ZFCP_LOG_INFO("initiation of cfdc up/download failed"
4238 "(adapter %s)\n",
4239 zfcp_get_busid_by_adapter(adapter));
4240 retval = -EPERM;
4241 goto free_fsf_req;
4242 }
4243 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
4244
4245 ZFCP_LOG_NORMAL("Control file %s FSF request has been sent to the "
4246 "adapter %s\n",
4247 fsf_command == FSF_QTCB_DOWNLOAD_CONTROL_FILE ?
4248 "download" : "upload",
4249 zfcp_get_busid_by_adapter(adapter));
4250
4251 wait_event(fsf_req->completion_wq,
4252 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4253
4254 *fsf_req_ptr = fsf_req;
4255 goto out;
4256
4257 free_fsf_req:
4258 zfcp_fsf_req_free(fsf_req);
4259 unlock_queue_lock:
4260 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
4261 out:
4262 return retval;
4263}
4264
4265 2131
4266/* 2132 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
4267 * function: zfcp_fsf_control_file_handler 2133 unit = req->data;
4268 * 2134 else
4269 * purpose: Handler of the control file upload/download FSF requests 2135 unit = req->unit;
4270 *
4271 * returns: 0 - FSF request successfuly processed
4272 * -EAGAIN - Operation has to be repeated because of a temporary problem
4273 * -EACCES - There is no permission to execute an operation
4274 * -EPERM - The control file is not in a right format
4275 * -EIO - There is a problem with the FCP adapter
4276 * -EINVAL - Invalid operation
4277 * -EFAULT - User space memory I/O operation fault
4278 */
4279static int
4280zfcp_fsf_control_file_handler(struct zfcp_fsf_req *fsf_req)
4281{
4282 struct zfcp_adapter *adapter = fsf_req->adapter;
4283 struct fsf_qtcb_header *header = &fsf_req->qtcb->header;
4284 struct fsf_qtcb_bottom_support *bottom = &fsf_req->qtcb->bottom.support;
4285 int retval = 0;
4286 2136
4287 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2137 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
4288 retval = -EINVAL;
4289 goto skip_fsfstatus; 2138 goto skip_fsfstatus;
4290 }
4291 2139
4292 switch (header->fsf_status) { 2140 switch (header->fsf_status) {
4293 2141 case FSF_HANDLE_MISMATCH:
4294 case FSF_GOOD: 2142 case FSF_PORT_HANDLE_NOT_VALID:
4295 ZFCP_LOG_NORMAL( 2143 zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, req);
4296 "The FSF request has been successfully completed " 2144 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4297 "on the adapter %s\n",
4298 zfcp_get_busid_by_adapter(adapter));
4299 break;
4300
4301 case FSF_OPERATION_PARTIALLY_SUCCESSFUL:
4302 if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) {
4303 switch (header->fsf_status_qual.word[0]) {
4304
4305 case FSF_SQ_CFDC_HARDENED_ON_SE:
4306 ZFCP_LOG_NORMAL(
4307 "CFDC on the adapter %s has being "
4308 "hardened on primary and secondary SE\n",
4309 zfcp_get_busid_by_adapter(adapter));
4310 break;
4311
4312 case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE:
4313 ZFCP_LOG_NORMAL(
4314 "CFDC of the adapter %s could not "
4315 "be saved on the SE\n",
4316 zfcp_get_busid_by_adapter(adapter));
4317 break;
4318
4319 case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2:
4320 ZFCP_LOG_NORMAL(
4321 "CFDC of the adapter %s could not "
4322 "be copied to the secondary SE\n",
4323 zfcp_get_busid_by_adapter(adapter));
4324 break;
4325
4326 default:
4327 ZFCP_LOG_NORMAL(
4328 "CFDC could not be hardened "
4329 "on the adapter %s\n",
4330 zfcp_get_busid_by_adapter(adapter));
4331 }
4332 }
4333 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4334 retval = -EAGAIN;
4335 break;
4336
4337 case FSF_AUTHORIZATION_FAILURE:
4338 ZFCP_LOG_NORMAL(
4339 "Adapter %s does not accept privileged commands\n",
4340 zfcp_get_busid_by_adapter(adapter));
4341 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4342 retval = -EACCES;
4343 break; 2145 break;
4344 2146 case FSF_FCPLUN_NOT_VALID:
4345 case FSF_CFDC_ERROR_DETECTED: 2147 case FSF_LUN_HANDLE_NOT_VALID:
4346 ZFCP_LOG_NORMAL( 2148 zfcp_erp_port_reopen(unit->port, 0, 113, req);
4347 "Error at position %d in the CFDC, " 2149 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4348 "CFDC is discarded by the adapter %s\n",
4349 header->fsf_status_qual.word[0],
4350 zfcp_get_busid_by_adapter(adapter));
4351 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4352 retval = -EPERM;
4353 break; 2150 break;
4354 2151 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
4355 case FSF_CONTROL_FILE_UPDATE_ERROR: 2152 zfcp_fsf_class_not_supp(req);
4356 ZFCP_LOG_NORMAL(
4357 "Adapter %s cannot harden the control file, "
4358 "file is discarded\n",
4359 zfcp_get_busid_by_adapter(adapter));
4360 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4361 retval = -EIO;
4362 break; 2153 break;
4363 2154 case FSF_ACCESS_DENIED:
4364 case FSF_CONTROL_FILE_TOO_LARGE: 2155 zfcp_fsf_access_denied_unit(req, unit);
4365 ZFCP_LOG_NORMAL(
4366 "Control file is too large, file is discarded "
4367 "by the adapter %s\n",
4368 zfcp_get_busid_by_adapter(adapter));
4369 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4370 retval = -EIO;
4371 break; 2156 break;
4372 2157 case FSF_DIRECTION_INDICATOR_NOT_VALID:
4373 case FSF_ACCESS_CONFLICT_DETECTED: 2158 dev_err(&req->adapter->ccw_device->dev,
4374 if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) 2159 "Invalid data direction (%d) given for unit "
4375 ZFCP_LOG_NORMAL( 2160 "0x%016Lx on port 0x%016Lx, shutting down "
4376 "CFDC has been discarded by the adapter %s, " 2161 "adapter.\n",
4377 "because activation would impact " 2162 req->qtcb->bottom.io.data_direction,
4378 "%d active connection(s)\n", 2163 unit->fcp_lun, unit->port->wwpn);
4379 zfcp_get_busid_by_adapter(adapter), 2164 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
4380 header->fsf_status_qual.word[0]); 2165 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4381 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4382 retval = -EIO;
4383 break; 2166 break;
4384 2167 case FSF_CMND_LENGTH_NOT_VALID:
4385 case FSF_CONFLICTS_OVERRULED: 2168 dev_err(&req->adapter->ccw_device->dev,
4386 if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) 2169 "An invalid control-data-block length field (%d) "
4387 ZFCP_LOG_NORMAL( 2170 "was found in a command for unit 0x%016Lx on port "
4388 "CFDC has been activated on the adapter %s, " 2171 "0x%016Lx. Shutting down adapter.\n",
4389 "but activation has impacted " 2172 req->qtcb->bottom.io.fcp_cmnd_length,
4390 "%d active connection(s)\n", 2173 unit->fcp_lun, unit->port->wwpn);
4391 zfcp_get_busid_by_adapter(adapter), 2174 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
4392 header->fsf_status_qual.word[0]); 2175 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4393 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4394 retval = -EIO;
4395 break; 2176 break;
4396 2177 case FSF_PORT_BOXED:
4397 case FSF_UNKNOWN_OP_SUBTYPE: 2178 zfcp_erp_port_boxed(unit->port, 53, req);
4398 ZFCP_LOG_NORMAL("unknown operation subtype (adapter: %s, " 2179 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
4399 "op_subtype=0x%x)\n", 2180 ZFCP_STATUS_FSFREQ_RETRY;
4400 zfcp_get_busid_by_adapter(adapter),
4401 bottom->operation_subtype);
4402 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4403 retval = -EINVAL;
4404 break; 2181 break;
4405 2182 case FSF_LUN_BOXED:
4406 case FSF_INVALID_COMMAND_OPTION: 2183 zfcp_erp_unit_boxed(unit, 54, req);
4407 ZFCP_LOG_NORMAL( 2184 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
4408 "Invalid option 0x%x has been specified " 2185 ZFCP_STATUS_FSFREQ_RETRY;
4409 "in QTCB bottom sent to the adapter %s\n",
4410 bottom->option,
4411 zfcp_get_busid_by_adapter(adapter));
4412 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4413 retval = -EINVAL;
4414 break; 2186 break;
4415 2187 case FSF_ADAPTER_STATUS_AVAILABLE:
4416 default: 2188 if (header->fsf_status_qual.word[0] ==
4417 ZFCP_LOG_NORMAL( 2189 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
4418 "bug: An unknown/unexpected FSF status 0x%08x " 2190 zfcp_test_link(unit->port);
4419 "was presented on the adapter %s\n", 2191 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4420 header->fsf_status,
4421 zfcp_get_busid_by_adapter(adapter));
4422 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4423 retval = -EINVAL;
4424 break; 2192 break;
4425 } 2193 }
4426
4427skip_fsfstatus: 2194skip_fsfstatus:
4428 return retval; 2195 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
4429} 2196 zfcp_fsf_send_fcp_ctm_handler(req);
4430 2197 else {
4431static inline int 2198 zfcp_fsf_send_fcp_command_task_handler(req);
4432zfcp_fsf_req_sbal_check(unsigned long *flags, 2199 req->unit = NULL;
4433 struct zfcp_qdio_queue *queue, int needed) 2200 zfcp_unit_put(unit);
4434{
4435 write_lock_irqsave(&queue->queue_lock, *flags);
4436 if (likely(atomic_read(&queue->free_count) >= needed))
4437 return 1;
4438 write_unlock_irqrestore(&queue->queue_lock, *flags);
4439 return 0;
4440}
4441
4442/*
4443 * set qtcb pointer in fsf_req and initialize QTCB
4444 */
4445static void
4446zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4447{
4448 if (likely(fsf_req->qtcb != NULL)) {
4449 fsf_req->qtcb->prefix.req_seq_no =
4450 fsf_req->adapter->fsf_req_seq_no;
4451 fsf_req->qtcb->prefix.req_id = fsf_req->req_id;
4452 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4453 fsf_req->qtcb->prefix.qtcb_type =
4454 fsf_qtcb_type[fsf_req->fsf_command];
4455 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4456 fsf_req->qtcb->header.req_handle = fsf_req->req_id;
4457 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
4458 } 2201 }
4459} 2202}
4460 2203
4461/** 2204/**
4462 * zfcp_fsf_req_sbal_get - try to get one SBAL in the request queue 2205 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
4463 * @adapter: adapter for which request queue is examined 2206 * @adapter: adapter where scsi command is issued
4464 * @req_flags: flags indicating whether to wait for needed SBAL or not 2207 * @unit: unit where command is sent to
4465 * @lock_flags: lock_flags if queue_lock is taken 2208 * @scsi_cmnd: scsi command to be sent
4466 * Return: 0 on success, otherwise -EIO, or -ERESTARTSYS 2209 * @timer: timer to be started when request is initiated
4467 * Locks: lock adapter->request_queue->queue_lock on success 2210 * @req_flags: flags for fsf_request
4468 */
4469static int
4470zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter, int req_flags,
4471 unsigned long *lock_flags)
4472{
4473 long ret;
4474 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4475
4476 if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) {
4477 ret = wait_event_interruptible_timeout(adapter->request_wq,
4478 zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1),
4479 ZFCP_SBAL_TIMEOUT);
4480 if (ret < 0)
4481 return ret;
4482 if (!ret)
4483 return -EIO;
4484 } else if (!zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1))
4485 return -EIO;
4486
4487 return 0;
4488}
4489
4490/*
4491 * function: zfcp_fsf_req_create
4492 *
4493 * purpose: create an FSF request at the specified adapter and
4494 * setup common fields
4495 *
4496 * returns: -ENOMEM if there was insufficient memory for a request
4497 * -EIO if no qdio buffers could be allocate to the request
4498 * -EINVAL/-EPERM on bug conditions in req_dequeue
4499 * 0 in success
4500 *
4501 * note: The created request is returned by reference.
4502 *
4503 * locks: lock of concerned request queue must not be held,
4504 * but is held on completion (write, irqsave)
4505 */ 2211 */
4506int 2212int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
4507zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, 2213 struct zfcp_unit *unit,
4508 mempool_t *pool, unsigned long *lock_flags, 2214 struct scsi_cmnd *scsi_cmnd,
4509 struct zfcp_fsf_req **fsf_req_p) 2215 int use_timer, int req_flags)
4510{ 2216{
4511 volatile struct qdio_buffer_element *sbale; 2217 struct zfcp_fsf_req *req;
4512 struct zfcp_fsf_req *fsf_req = NULL; 2218 struct fcp_cmnd_iu *fcp_cmnd_iu;
4513 int ret = 0; 2219 unsigned int sbtype;
4514 struct zfcp_qdio_queue *req_queue = &adapter->request_queue; 2220 int real_bytes, retval = -EIO;
4515
4516 /* allocate new FSF request */
4517 fsf_req = zfcp_fsf_req_alloc(pool, req_flags);
4518 if (unlikely(NULL == fsf_req)) {
4519 ZFCP_LOG_DEBUG("error: Could not put an FSF request into "
4520 "the outbound (send) queue.\n");
4521 ret = -ENOMEM;
4522 goto failed_fsf_req;
4523 }
4524
4525 fsf_req->adapter = adapter;
4526 fsf_req->fsf_command = fsf_cmd;
4527 INIT_LIST_HEAD(&fsf_req->list);
4528 init_timer(&fsf_req->timer);
4529 2221
4530 /* initialize waitqueue which may be used to wait on 2222 if (unlikely(!(atomic_read(&unit->status) &
4531 this request completion */ 2223 ZFCP_STATUS_COMMON_UNBLOCKED)))
4532 init_waitqueue_head(&fsf_req->completion_wq); 2224 return -EBUSY;
4533 2225
4534 ret = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags); 2226 spin_lock(&adapter->req_q.lock);
4535 if (ret < 0) 2227 if (!atomic_read(&adapter->req_q.count))
4536 goto failed_sbals; 2228 goto out;
2229 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2230 adapter->pool.fsf_req_scsi);
2231 if (unlikely(IS_ERR(req))) {
2232 retval = PTR_ERR(req);
2233 goto out;
2234 }
4537 2235
4538 /* this is serialized (we are holding req_queue-lock of adapter) */ 2236 zfcp_unit_get(unit);
4539 if (adapter->req_no == 0) 2237 req->unit = unit;
4540 adapter->req_no++; 2238 req->data = scsi_cmnd;
4541 fsf_req->req_id = adapter->req_no++; 2239 req->handler = zfcp_fsf_send_fcp_command_handler;
2240 req->qtcb->header.lun_handle = unit->handle;
2241 req->qtcb->header.port_handle = unit->port->handle;
2242 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
4542 2243
4543 zfcp_fsf_req_qtcb_init(fsf_req); 2244 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
4544 2245
2246 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2247 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
4545 /* 2248 /*
4546 * We hold queue_lock here. Check if QDIOUP is set and let request fail 2249 * set depending on data direction:
4547 * if it is not set (see also *_open_qdio and *_close_qdio). 2250 * data direction bits in SBALE (SB Type)
2251 * data direction bits in QTCB
2252 * data direction bits in FCP_CMND IU
4548 */ 2253 */
4549 2254 switch (scsi_cmnd->sc_data_direction) {
4550 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) { 2255 case DMA_NONE:
4551 write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags); 2256 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
4552 ret = -EIO; 2257 sbtype = SBAL_FLAGS0_TYPE_READ;
4553 goto failed_sbals; 2258 break;
2259 case DMA_FROM_DEVICE:
2260 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2261 sbtype = SBAL_FLAGS0_TYPE_READ;
2262 fcp_cmnd_iu->rddata = 1;
2263 break;
2264 case DMA_TO_DEVICE:
2265 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2266 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2267 fcp_cmnd_iu->wddata = 1;
2268 break;
2269 case DMA_BIDIRECTIONAL:
2270 default:
2271 retval = -EIO;
2272 goto failed_scsi_cmnd;
4554 } 2273 }
4555 2274
4556 if (fsf_req->qtcb) { 2275 if (likely((scsi_cmnd->device->simple_tags) ||
4557 fsf_req->seq_no = adapter->fsf_req_seq_no; 2276 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
4558 fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 2277 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
4559 } 2278 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
4560 fsf_req->sbal_number = 1; 2279 else
4561 fsf_req->sbal_first = req_queue->free_index; 2280 fcp_cmnd_iu->task_attribute = UNTAGGED;
4562 fsf_req->sbal_curr = req_queue->free_index;
4563 fsf_req->sbale_curr = 1;
4564 2281
4565 if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) { 2282 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
4566 fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2283 fcp_cmnd_iu->add_fcp_cdb_length =
4567 } 2284 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
4568 2285
4569 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 2286 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4570 2287
4571 /* setup common SBALE fields */ 2288 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
4572 sbale[0].addr = (void *) fsf_req->req_id; 2289 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(fcp_dl_t);
4573 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 2290
4574 if (likely(fsf_req->qtcb != NULL)) { 2291 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
4575 sbale[1].addr = (void *) fsf_req->qtcb; 2292 scsi_sglist(scsi_cmnd),
4576 sbale[1].length = sizeof(struct fsf_qtcb); 2293 FSF_MAX_SBALS_PER_REQ);
2294 if (unlikely(real_bytes < 0)) {
2295 if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
2296 retval = -EIO;
2297 else {
2298 dev_err(&adapter->ccw_device->dev,
2299 "SCSI request too large. "
2300 "Shutting down unit 0x%016Lx on port "
2301 "0x%016Lx.\n", unit->fcp_lun,
2302 unit->port->wwpn);
2303 zfcp_erp_unit_shutdown(unit, 0, 131, req);
2304 retval = -EINVAL;
2305 }
2306 goto failed_scsi_cmnd;
4577 } 2307 }
4578 2308
4579 ZFCP_LOG_TRACE("got %i free BUFFERs starting at index %i\n", 2309 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
4580 fsf_req->sbal_number, fsf_req->sbal_first);
4581 2310
4582 goto success; 2311 if (use_timer)
2312 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
4583 2313
4584 failed_sbals: 2314 retval = zfcp_fsf_req_send(req);
4585/* dequeue new FSF request previously enqueued */ 2315 if (unlikely(retval))
4586 zfcp_fsf_req_free(fsf_req); 2316 goto failed_scsi_cmnd;
4587 fsf_req = NULL;
4588 2317
4589 failed_fsf_req: 2318 goto out;
4590 write_lock_irqsave(&req_queue->queue_lock, *lock_flags); 2319
4591 success: 2320failed_scsi_cmnd:
4592 *fsf_req_p = fsf_req; 2321 zfcp_unit_put(unit);
4593 return ret; 2322 zfcp_fsf_req_free(req);
2323 scsi_cmnd->host_scribble = NULL;
2324out:
2325 spin_unlock(&adapter->req_q.lock);
2326 return retval;
4594} 2327}
4595 2328
4596/* 2329/**
4597 * function: zfcp_fsf_req_send 2330 * zfcp_fsf_send_fcp_ctm - send SCSI task management command
4598 * 2331 * @adapter: pointer to struct zfcp-adapter
4599 * purpose: start transfer of FSF request via QDIO 2332 * @unit: pointer to struct zfcp_unit
4600 * 2333 * @tm_flags: unsigned byte for task management flags
4601 * returns: 0 - request transfer succesfully started 2334 * @req_flags: int request flags
4602 * !0 - start of request transfer failed 2335 * Returns: on success pointer to struct fsf_req, NULL otherwise
4603 */ 2336 */
4604static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) 2337struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2338 struct zfcp_unit *unit,
2339 u8 tm_flags, int req_flags)
4605{ 2340{
4606 struct zfcp_adapter *adapter;
4607 struct zfcp_qdio_queue *req_queue;
4608 volatile struct qdio_buffer_element *sbale; 2341 volatile struct qdio_buffer_element *sbale;
4609 int inc_seq_no; 2342 struct zfcp_fsf_req *req = NULL;
4610 int new_distance_from_int; 2343 struct fcp_cmnd_iu *fcp_cmnd_iu;
4611 int retval = 0;
4612 2344
4613 adapter = fsf_req->adapter; 2345 if (unlikely(!(atomic_read(&unit->status) &
4614 req_queue = &adapter->request_queue, 2346 ZFCP_STATUS_COMMON_UNBLOCKED)))
2347 return NULL;
4615 2348
2349 spin_lock(&adapter->req_q.lock);
2350 if (!atomic_read(&adapter->req_q.count))
2351 goto out;
2352 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2353 adapter->pool.fsf_req_scsi);
2354 if (unlikely(IS_ERR(req)))
2355 goto out;
4616 2356
4617 /* FIXME(debug): remove it later */ 2357 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
4618 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_first, 0); 2358 req->data = unit;
4619 ZFCP_LOG_DEBUG("SBALE0 flags=0x%x\n", sbale[0].flags); 2359 req->handler = zfcp_fsf_send_fcp_command_handler;
4620 ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n"); 2360 req->qtcb->header.lun_handle = unit->handle;
4621 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, 2361 req->qtcb->header.port_handle = unit->port->handle;
4622 sbale[1].length); 2362 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2363 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2364 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2365 sizeof(fcp_dl_t);
2366
2367 sbale = zfcp_qdio_sbale_req(req);
2368 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2369 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
4623 2370
4624 /* put allocated FSF request into hash table */ 2371 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
4625 spin_lock(&adapter->req_list_lock); 2372 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
4626 zfcp_reqlist_add(adapter, fsf_req); 2373 fcp_cmnd_iu->task_management_flags = tm_flags;
4627 spin_unlock(&adapter->req_list_lock);
4628 2374
4629 inc_seq_no = (fsf_req->qtcb != NULL); 2375 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2376 if (!zfcp_fsf_req_send(req))
2377 goto out;
4630 2378
4631 ZFCP_LOG_TRACE("request queue of adapter %s: " 2379 zfcp_fsf_req_free(req);
4632 "next free SBAL is %i, %i free SBALs\n", 2380 req = NULL;
4633 zfcp_get_busid_by_adapter(adapter), 2381out:
4634 req_queue->free_index, 2382 spin_unlock(&adapter->req_q.lock);
4635 atomic_read(&req_queue->free_count)); 2383 return req;
2384}
4636 2385
4637 ZFCP_LOG_DEBUG("calling do_QDIO adapter %s, flags=0x%x, queue_no=%i, " 2386static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
4638 "index_in_queue=%i, count=%i, buffers=%p\n", 2387{
4639 zfcp_get_busid_by_adapter(adapter), 2388 if (req->qtcb->header.fsf_status != FSF_GOOD)
4640 QDIO_FLAG_SYNC_OUTPUT, 2389 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
4641 0, fsf_req->sbal_first, fsf_req->sbal_number, 2390}
4642 &req_queue->buffer[fsf_req->sbal_first]);
4643 2391
4644 /* 2392/**
4645 * adjust the number of free SBALs in request queue as well as 2393 * zfcp_fsf_control_file - control file upload/download
4646 * position of first one 2394 * @adapter: pointer to struct zfcp_adapter
4647 */ 2395 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
4648 atomic_sub(fsf_req->sbal_number, &req_queue->free_count); 2396 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
4649 ZFCP_LOG_TRACE("free_count=%d\n", atomic_read(&req_queue->free_count)); 2397 */
4650 req_queue->free_index += fsf_req->sbal_number; /* increase */ 2398struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4651 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */ 2399 struct zfcp_fsf_cfdc *fsf_cfdc)
4652 new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req); 2400{
2401 volatile struct qdio_buffer_element *sbale;
2402 struct zfcp_fsf_req *req = NULL;
2403 struct fsf_qtcb_bottom_support *bottom;
2404 int direction, retval = -EIO, bytes;
2405
2406 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2407 return ERR_PTR(-EOPNOTSUPP);
2408
2409 switch (fsf_cfdc->command) {
2410 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2411 direction = SBAL_FLAGS0_TYPE_WRITE;
2412 break;
2413 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2414 direction = SBAL_FLAGS0_TYPE_READ;
2415 break;
2416 default:
2417 return ERR_PTR(-EINVAL);
2418 }
4653 2419
4654 fsf_req->issued = get_clock(); 2420 spin_lock(&adapter->req_q.lock);
2421 if (zfcp_fsf_req_sbal_get(adapter))
2422 goto out;
4655 2423
4656 retval = do_QDIO(adapter->ccw_device, 2424 req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL);
4657 QDIO_FLAG_SYNC_OUTPUT, 2425 if (unlikely(IS_ERR(req))) {
4658 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); 2426 retval = -EPERM;
2427 goto out;
2428 }
4659 2429
4660 if (unlikely(retval)) { 2430 req->handler = zfcp_fsf_control_file_handler;
4661 /* Queues are down..... */ 2431
4662 retval = -EIO; 2432 sbale = zfcp_qdio_sbale_req(req);
4663 del_timer(&fsf_req->timer); 2433 sbale[0].flags |= direction;
4664 spin_lock(&adapter->req_list_lock);
4665 zfcp_reqlist_remove(adapter, fsf_req);
4666 spin_unlock(&adapter->req_list_lock);
4667 /* undo changes in request queue made for this request */
4668 zfcp_qdio_zero_sbals(req_queue->buffer,
4669 fsf_req->sbal_first, fsf_req->sbal_number);
4670 atomic_add(fsf_req->sbal_number, &req_queue->free_count);
4671 req_queue->free_index -= fsf_req->sbal_number;
4672 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
4673 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
4674 zfcp_erp_adapter_reopen(adapter, 0, 116, fsf_req);
4675 } else {
4676 req_queue->distance_from_int = new_distance_from_int;
4677 /*
4678 * increase FSF sequence counter -
4679 * this must only be done for request successfully enqueued to
4680 * QDIO this rejected requests may be cleaned up by calling
4681 * routines resulting in missing sequence counter values
4682 * otherwise,
4683 */
4684 2434
4685 /* Don't increase for unsolicited status */ 2435 bottom = &req->qtcb->bottom.support;
4686 if (inc_seq_no) 2436 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
4687 adapter->fsf_req_seq_no++; 2437 bottom->option = fsf_cfdc->option;
4688 2438
4689 /* count FSF requests pending */ 2439 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg,
4690 atomic_inc(&adapter->reqs_active); 2440 FSF_MAX_SBALS_PER_REQ);
2441 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2442 retval = -ENOMEM;
2443 zfcp_fsf_req_free(req);
2444 goto out;
4691 } 2445 }
4692 return retval;
4693}
4694 2446
4695#undef ZFCP_LOG_AREA 2447 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2448 retval = zfcp_fsf_req_send(req);
2449out:
2450 spin_unlock(&adapter->req_q.lock);
2451
2452 if (!retval) {
2453 wait_event(req->completion_wq,
2454 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2455 return req;
2456 }
2457 return ERR_PTR(retval);
2458}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 099970b27001..bf94b4da0763 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -1,27 +1,16 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Interface to the FSF support functions.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#ifndef FSF_H 9#ifndef FSF_H
23#define FSF_H 10#define FSF_H
24 11
12#include <linux/pfn.h>
13
25#define FSF_QTCB_CURRENT_VERSION 0x00000001 14#define FSF_QTCB_CURRENT_VERSION 0x00000001
26 15
27/* FSF commands */ 16/* FSF commands */
@@ -258,6 +247,16 @@
258#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000 247#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
259#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000 248#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
260 249
250/* FSF interface for CFDC */
251#define ZFCP_CFDC_MAX_SIZE 127 * 1024
252#define ZFCP_CFDC_PAGES PFN_UP(ZFCP_CFDC_MAX_SIZE)
253
254struct zfcp_fsf_cfdc {
255 struct scatterlist sg[ZFCP_CFDC_PAGES];
256 u32 command;
257 u32 option;
258};
259
261struct fsf_queue_designator { 260struct fsf_queue_designator {
262 u8 cssid; 261 u8 cssid;
263 u8 chpid; 262 u8 chpid;
@@ -288,6 +287,18 @@ struct fsf_bit_error_payload {
288 u32 current_transmit_b2b_credit; 287 u32 current_transmit_b2b_credit;
289} __attribute__ ((packed)); 288} __attribute__ ((packed));
290 289
290struct fsf_link_down_info {
291 u32 error_code;
292 u32 res1;
293 u8 res2[2];
294 u8 primary_status;
295 u8 ioerr_code;
296 u8 action_code;
297 u8 reason_code;
298 u8 explanation_code;
299 u8 vendor_specific_code;
300} __attribute__ ((packed));
301
291struct fsf_status_read_buffer { 302struct fsf_status_read_buffer {
292 u32 status_type; 303 u32 status_type;
293 u32 status_subtype; 304 u32 status_subtype;
@@ -298,7 +309,12 @@ struct fsf_status_read_buffer {
298 u32 class; 309 u32 class;
299 u64 fcp_lun; 310 u64 fcp_lun;
300 u8 res3[24]; 311 u8 res3[24];
301 u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE]; 312 union {
313 u8 data[FSF_STATUS_READ_PAYLOAD_SIZE];
314 u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)];
315 struct fsf_link_down_info link_down_info;
316 struct fsf_bit_error_payload bit_error;
317 } payload;
302} __attribute__ ((packed)); 318} __attribute__ ((packed));
303 319
304struct fsf_qual_version_error { 320struct fsf_qual_version_error {
@@ -311,23 +327,19 @@ struct fsf_qual_sequence_error {
311 u32 res1[3]; 327 u32 res1[3];
312} __attribute__ ((packed)); 328} __attribute__ ((packed));
313 329
314struct fsf_link_down_info { 330struct fsf_qual_latency_info {
315 u32 error_code; 331 u32 channel_lat;
316 u32 res1; 332 u32 fabric_lat;
317 u8 res2[2]; 333 u8 res1[8];
318 u8 primary_status;
319 u8 ioerr_code;
320 u8 action_code;
321 u8 reason_code;
322 u8 explanation_code;
323 u8 vendor_specific_code;
324} __attribute__ ((packed)); 334} __attribute__ ((packed));
325 335
326union fsf_prot_status_qual { 336union fsf_prot_status_qual {
337 u32 word[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u32)];
327 u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)]; 338 u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
328 struct fsf_qual_version_error version_error; 339 struct fsf_qual_version_error version_error;
329 struct fsf_qual_sequence_error sequence_error; 340 struct fsf_qual_sequence_error sequence_error;
330 struct fsf_link_down_info link_down_info; 341 struct fsf_link_down_info link_down_info;
342 struct fsf_qual_latency_info latency_info;
331} __attribute__ ((packed)); 343} __attribute__ ((packed));
332 344
333struct fsf_qtcb_prefix { 345struct fsf_qtcb_prefix {
@@ -437,7 +449,9 @@ struct fsf_qtcb_bottom_config {
437 u32 fc_link_speed; 449 u32 fc_link_speed;
438 u32 adapter_type; 450 u32 adapter_type;
439 u32 peer_d_id; 451 u32 peer_d_id;
440 u8 res2[12]; 452 u8 res1[2];
453 u16 timer_interval;
454 u8 res2[8];
441 u32 s_id; 455 u32 s_id;
442 struct fsf_nport_serv_param nport_serv_param; 456 struct fsf_nport_serv_param nport_serv_param;
443 u8 reserved_nport_serv_param[16]; 457 u8 reserved_nport_serv_param[16];
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 8ca5f074c687..d6dbd653fde9 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -1,241 +1,101 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Setup and helper functions to access QDIO.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#include "zfcp_ext.h" 9#include "zfcp_ext.h"
23 10
24static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); 11/* FIXME(tune): free space should be one max. SBAL chain plus what? */
25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get 12#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
26 (struct zfcp_qdio_queue *, int, int); 13 - (FSF_MAX_SBALS_PER_REQ + 4))
27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp 14#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
28 (struct zfcp_fsf_req *, int, int);
29static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
30 (struct zfcp_fsf_req *, unsigned long);
31static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
32 (struct zfcp_fsf_req *, unsigned long);
33static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
35static void zfcp_qdio_sbale_fill
36 (struct zfcp_fsf_req *, unsigned long, void *, int);
37static int zfcp_qdio_sbals_from_segment
38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
39
40static qdio_handler_t zfcp_qdio_request_handler;
41static qdio_handler_t zfcp_qdio_response_handler;
42static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
43 unsigned int, unsigned int, unsigned int, int, int);
44
45#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
46
47/*
48 * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
49 * in the adapter struct sbuf is the pointer array.
50 *
51 * locks: must only be called with zfcp_data.config_sema taken
52 */
53static void
54zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf)
55{
56 int pos;
57
58 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE)
59 free_page((unsigned long) sbuf[pos]);
60}
61 15
62/* 16static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
63 * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
64 * array in the adapter struct.
65 * Cur_buf is the pointer array
66 *
67 * returns: zero on success else -ENOMEM
68 * locks: must only be called with zfcp_data.config_sema taken
69 */
70static int
71zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf)
72{ 17{
73 int pos; 18 int pos;
74 19
75 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { 20 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
76 sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); 21 sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
77 if (!sbuf[pos]) { 22 if (!sbal[pos])
78 zfcp_qdio_buffers_dequeue(sbuf);
79 return -ENOMEM; 23 return -ENOMEM;
80 }
81 } 24 }
82 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) 25 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
83 if (pos % QBUFF_PER_PAGE) 26 if (pos % QBUFF_PER_PAGE)
84 sbuf[pos] = sbuf[pos - 1] + 1; 27 sbal[pos] = sbal[pos - 1] + 1;
85 return 0; 28 return 0;
86} 29}
87 30
88/* locks: must only be called with zfcp_data.config_sema taken */ 31static volatile struct qdio_buffer_element *
89int 32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
90zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter)
91{ 33{
92 int ret; 34 return &q->sbal[sbal_idx]->element[sbale_idx];
93
94 ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer);
95 if (ret)
96 return ret;
97 return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer);
98} 35}
99 36
100/* locks: must only be called with zfcp_data.config_sema taken */ 37/**
101void 38 * zfcp_qdio_free - free memory used by request- and resposne queue
102zfcp_qdio_free_queues(struct zfcp_adapter *adapter) 39 * @adapter: pointer to the zfcp_adapter structure
40 */
41void zfcp_qdio_free(struct zfcp_adapter *adapter)
103{ 42{
104 ZFCP_LOG_TRACE("freeing request_queue buffers\n"); 43 struct qdio_buffer **sbal_req, **sbal_resp;
105 zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer); 44 int p;
106 45
107 ZFCP_LOG_TRACE("freeing response_queue buffers\n"); 46 if (adapter->ccw_device)
108 zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer); 47 qdio_free(adapter->ccw_device);
109}
110 48
111int 49 sbal_req = adapter->req_q.sbal;
112zfcp_qdio_allocate(struct zfcp_adapter *adapter) 50 sbal_resp = adapter->resp_q.sbal;
113{
114 struct qdio_initialize *init_data;
115 51
116 init_data = &adapter->qdio_init_data; 52 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
53 free_page((unsigned long) sbal_req[p]);
54 free_page((unsigned long) sbal_resp[p]);
55 }
56}
117 57
118 init_data->cdev = adapter->ccw_device; 58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
119 init_data->q_format = QDIO_SCSI_QFMT; 59{
120 memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); 60 dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n");
121 ASCEBC(init_data->adapter_name, 8);
122 init_data->qib_param_field_format = 0;
123 init_data->qib_param_field = NULL;
124 init_data->input_slib_elements = NULL;
125 init_data->output_slib_elements = NULL;
126 init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD;
127 init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD;
128 init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD;
129 init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD;
130 init_data->no_input_qs = 1;
131 init_data->no_output_qs = 1;
132 init_data->input_handler = zfcp_qdio_response_handler;
133 init_data->output_handler = zfcp_qdio_request_handler;
134 init_data->int_parm = (unsigned long) adapter;
135 init_data->flags = QDIO_INBOUND_0COPY_SBALS |
136 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
137 init_data->input_sbal_addr_array =
138 (void **) (adapter->response_queue.buffer);
139 init_data->output_sbal_addr_array =
140 (void **) (adapter->request_queue.buffer);
141 61
142 return qdio_allocate(init_data); 62 zfcp_erp_adapter_reopen(adapter,
63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
64 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
143} 65}
144 66
145/* 67static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
146 * function: zfcp_qdio_handler_error_check
147 *
148 * purpose: called by the response handler to determine error condition
149 *
150 * returns: error flag
151 *
152 */
153static int
154zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
155 unsigned int qdio_error, unsigned int siga_error,
156 int first_element, int elements_processed)
157{ 68{
158 int retval = 0; 69 int i, sbal_idx;
159 70
160 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 71 for (i = first; i < first + cnt; i++) {
161 retval = -EIO; 72 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
162 73 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
163 ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, "
164 "qdio_error=0x%x, siga_error=0x%x)\n",
165 status, qdio_error, siga_error);
166
167 zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error,
168 first_element, elements_processed);
169 /*
170 * Restarting IO on the failed adapter from scratch.
171 * Since we have been using this adapter, it is save to assume
172 * that it is not failed but recoverable. The card seems to
173 * report link-up events by self-initiated queue shutdown.
174 * That is why we need to clear the link-down flag
175 * which is set again in case we have missed by a mile.
176 */
177 zfcp_erp_adapter_reopen(adapter,
178 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
179 ZFCP_STATUS_COMMON_ERP_FAILED, 140,
180 NULL);
181 } 74 }
182 return retval;
183} 75}
184 76
185/* 77static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
186 * function: zfcp_qdio_request_handler 78 int queue_no, int first, int count,
187 * 79 unsigned long parm)
188 * purpose: is called by QDIO layer for completed SBALs in request queue
189 *
190 * returns: (void)
191 */
192static void
193zfcp_qdio_request_handler(struct ccw_device *ccw_device,
194 unsigned int status,
195 unsigned int qdio_error,
196 unsigned int siga_error,
197 unsigned int queue_number,
198 int first_element,
199 int elements_processed,
200 unsigned long int_parm)
201{ 80{
202 struct zfcp_adapter *adapter; 81 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
203 struct zfcp_qdio_queue *queue; 82 struct zfcp_qdio_queue *queue = &adapter->req_q;
204
205 adapter = (struct zfcp_adapter *) int_parm;
206 queue = &adapter->request_queue;
207 83
208 ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n", 84 if (unlikely(qdio_err)) {
209 zfcp_get_busid_by_adapter(adapter), 85 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
210 first_element, elements_processed); 86 zfcp_qdio_handler_error(adapter, 140);
211 87 return;
212 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 88 }
213 siga_error, first_element,
214 elements_processed)))
215 goto out;
216 /*
217 * we stored address of struct zfcp_adapter data structure
218 * associated with irq in int_parm
219 */
220 89
221 /* cleanup all SBALs being program-owned now */ 90 /* cleanup all SBALs being program-owned now */
222 zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed); 91 zfcp_qdio_zero_sbals(queue->sbal, first, count);
223 92
224 /* increase free space in outbound queue */ 93 atomic_add(count, &queue->count);
225 atomic_add(elements_processed, &queue->free_count);
226 ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count));
227 wake_up(&adapter->request_wq); 94 wake_up(&adapter->request_wq);
228 ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n",
229 elements_processed, atomic_read(&queue->free_count));
230 out:
231 return;
232} 95}
233 96
234/**
235 * zfcp_qdio_reqid_check - checks for valid reqids.
236 */
237static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, 97static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
238 unsigned long req_id) 98 unsigned long req_id, int sbal_idx)
239{ 99{
240 struct zfcp_fsf_req *fsf_req; 100 struct zfcp_fsf_req *fsf_req;
241 unsigned long flags; 101 unsigned long flags;
@@ -248,203 +108,114 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
248 * Unknown request means that we have potentially memory 108 * Unknown request means that we have potentially memory
249 * corruption and must stop the machine immediatly. 109 * corruption and must stop the machine immediatly.
250 */ 110 */
251 panic("error: unknown request id (%ld) on adapter %s.\n", 111 panic("error: unknown request id (%lx) on adapter %s.\n",
252 req_id, zfcp_get_busid_by_adapter(adapter)); 112 req_id, zfcp_get_busid_by_adapter(adapter));
253 113
254 zfcp_reqlist_remove(adapter, fsf_req); 114 zfcp_reqlist_remove(adapter, fsf_req);
255 atomic_dec(&adapter->reqs_active);
256 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 115 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
257 116
258 /* finish the FSF request */ 117 fsf_req->sbal_response = sbal_idx;
259 zfcp_fsf_req_complete(fsf_req); 118 zfcp_fsf_req_complete(fsf_req);
260} 119}
261 120
262/* 121static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
263 * function: zfcp_qdio_response_handler
264 *
265 * purpose: is called by QDIO layer for completed SBALs in response queue
266 *
267 * returns: (void)
268 */
269static void
270zfcp_qdio_response_handler(struct ccw_device *ccw_device,
271 unsigned int status,
272 unsigned int qdio_error,
273 unsigned int siga_error,
274 unsigned int queue_number,
275 int first_element,
276 int elements_processed,
277 unsigned long int_parm)
278{ 122{
279 struct zfcp_adapter *adapter; 123 struct zfcp_qdio_queue *queue = &adapter->resp_q;
280 struct zfcp_qdio_queue *queue; 124 struct ccw_device *cdev = adapter->ccw_device;
281 int buffer_index; 125 u8 count, start = queue->first;
282 int i; 126 unsigned int retval;
283 struct qdio_buffer *buffer;
284 int retval = 0;
285 u8 count;
286 u8 start;
287 volatile struct qdio_buffer_element *buffere = NULL;
288 int buffere_index;
289
290 adapter = (struct zfcp_adapter *) int_parm;
291 queue = &adapter->response_queue;
292
293 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
294 siga_error, first_element,
295 elements_processed)))
296 goto out;
297 127
298 /* 128 count = atomic_read(&queue->count) + processed;
299 * we stored address of struct zfcp_adapter data structure 129
300 * associated with irq in int_parm 130 retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
301 */ 131
132 if (unlikely(retval)) {
133 atomic_set(&queue->count, count);
134 /* FIXME: Recover this with an adapter reopen? */
135 } else {
136 queue->first += count;
137 queue->first %= QDIO_MAX_BUFFERS_PER_Q;
138 atomic_set(&queue->count, 0);
139 }
140}
141
142static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
143 int queue_no, int first, int count,
144 unsigned long parm)
145{
146 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
147 struct zfcp_qdio_queue *queue = &adapter->resp_q;
148 volatile struct qdio_buffer_element *sbale;
149 int sbal_idx, sbale_idx, sbal_no;
150
151 if (unlikely(qdio_err)) {
152 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
153 zfcp_qdio_handler_error(adapter, 147);
154 return;
155 }
302 156
303 buffere = &(queue->buffer[first_element]->element[0]);
304 ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags);
305 /* 157 /*
306 * go through all SBALs from input queue currently 158 * go through all SBALs from input queue currently
307 * returned by QDIO layer 159 * returned by QDIO layer
308 */ 160 */
309 161 for (sbal_no = 0; sbal_no < count; sbal_no++) {
310 for (i = 0; i < elements_processed; i++) { 162 sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
311
312 buffer_index = first_element + i;
313 buffer_index %= QDIO_MAX_BUFFERS_PER_Q;
314 buffer = queue->buffer[buffer_index];
315 163
316 /* go through all SBALEs of SBAL */ 164 /* go through all SBALEs of SBAL */
317 for (buffere_index = 0; 165 for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER;
318 buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER; 166 sbale_idx++) {
319 buffere_index++) { 167 sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx);
320
321 /* look for QDIO request identifiers in SB */
322 buffere = &buffer->element[buffere_index];
323 zfcp_qdio_reqid_check(adapter, 168 zfcp_qdio_reqid_check(adapter,
324 (unsigned long) buffere->addr); 169 (unsigned long) sbale->addr,
325 170 sbal_idx);
326 /* 171 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
327 * A single used SBALE per inbound SBALE has been
328 * implemented by QDIO so far. Hope they will
329 * do some optimisation. Will need to change to
330 * unlikely() then.
331 */
332 if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY))
333 break; 172 break;
334 }; 173 };
335 174
336 if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) { 175 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
337 ZFCP_LOG_NORMAL("bug: End of inbound data " 176 dev_warn(&adapter->ccw_device->dev,
338 "not marked!\n"); 177 "Protocol violation by adapter. "
339 } 178 "Continuing operations.\n");
340 } 179 }
341 180
342 /* 181 /*
343 * put range of SBALs back to response queue 182 * put range of SBALs back to response queue
344 * (including SBALs which have already been free before) 183 * (including SBALs which have already been free before)
345 */ 184 */
346 count = atomic_read(&queue->free_count) + elements_processed; 185 zfcp_qdio_resp_put_back(adapter, count);
347 start = queue->free_index;
348
349 ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
350 "queue_no=%i, index_in_queue=%i, count=%i, "
351 "buffers=0x%lx\n",
352 zfcp_get_busid_by_adapter(adapter),
353 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
354 0, start, count, (unsigned long) &queue->buffer[start]);
355
356 retval = do_QDIO(ccw_device,
357 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
358 0, start, count, NULL);
359
360 if (unlikely(retval)) {
361 atomic_set(&queue->free_count, count);
362 ZFCP_LOG_DEBUG("clearing of inbound data regions failed, "
363 "queues may be down "
364 "(count=%d, start=%d, retval=%d)\n",
365 count, start, retval);
366 } else {
367 queue->free_index += count;
368 queue->free_index %= QDIO_MAX_BUFFERS_PER_Q;
369 atomic_set(&queue->free_count, 0);
370 ZFCP_LOG_TRACE("%i buffers enqueued to response "
371 "queue at position %i\n", count, start);
372 }
373 out:
374 return;
375} 186}
376 187
377/** 188/**
378 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue 189 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
379 * @queue: queue from which SBALE should be returned 190 * @fsf_req: pointer to struct fsf_req
380 * @sbal: specifies number of SBAL in queue 191 * Returns: pointer to qdio_buffer_element (SBALE) structure
381 * @sbale: specifes number of SBALE in SBAL
382 */
383static inline volatile struct qdio_buffer_element *
384zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
385{
386 return &queue->buffer[sbal]->element[sbale];
387}
388
389/**
390 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
391 * a struct zfcp_fsf_req
392 */ 192 */
393volatile struct qdio_buffer_element * 193volatile struct qdio_buffer_element *
394zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) 194zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
395{ 195{
396 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, 196 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
397 sbal, sbale);
398} 197}
399 198
400/** 199/**
401 * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for 200 * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
402 * a struct zfcp_fsf_req 201 * @fsf_req: pointer to struct fsf_req
403 */ 202 * Returns: pointer to qdio_buffer_element (SBALE) structure
404static inline volatile struct qdio_buffer_element *
405zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
406{
407 return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue,
408 sbal, sbale);
409}
410
411/**
412 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
413 * a struct zfcp_fsf_req
414 */ 203 */
415volatile struct qdio_buffer_element * 204volatile struct qdio_buffer_element *
416zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) 205zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
417{ 206{
418 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 207 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
419 fsf_req->sbale_curr); 208 req->sbale_curr);
420} 209}
421 210
422/** 211static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
423 * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used
424 * on the request_queue for a struct zfcp_fsf_req
425 * @fsf_req: the number of the last SBAL that can be used is stored herein
426 * @max_sbals: used to pass an upper limit for the number of SBALs
427 *
428 * Note: We can assume at least one free SBAL in the request_queue when called.
429 */
430static void
431zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
432{ 212{
433 int count = atomic_read(&fsf_req->adapter->request_queue.free_count); 213 int count = atomic_read(&fsf_req->adapter->req_q.count);
434 count = min(count, max_sbals); 214 count = min(count, max_sbals);
435 fsf_req->sbal_last = fsf_req->sbal_first; 215 fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1)
436 fsf_req->sbal_last += (count - 1); 216 % QDIO_MAX_BUFFERS_PER_Q;
437 fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
438} 217}
439 218
440/**
441 * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a
442 * request
443 * @fsf_req: zfcp_fsf_req to be processed
444 * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL
445 *
446 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
447 */
448static volatile struct qdio_buffer_element * 219static volatile struct qdio_buffer_element *
449zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 220zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
450{ 221{
@@ -455,16 +226,16 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
455 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 226 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
456 227
457 /* don't exceed last allowed SBAL */ 228 /* don't exceed last allowed SBAL */
458 if (fsf_req->sbal_curr == fsf_req->sbal_last) 229 if (fsf_req->sbal_last == fsf_req->sbal_limit)
459 return NULL; 230 return NULL;
460 231
461 /* set chaining flag in first SBALE of current SBAL */ 232 /* set chaining flag in first SBALE of current SBAL */
462 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 233 sbale = zfcp_qdio_sbale_req(fsf_req);
463 sbale->flags |= SBAL_FLAGS0_MORE_SBALS; 234 sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
464 235
465 /* calculate index of next SBAL */ 236 /* calculate index of next SBAL */
466 fsf_req->sbal_curr++; 237 fsf_req->sbal_last++;
467 fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q; 238 fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
468 239
469 /* keep this requests number of SBALs up-to-date */ 240 /* keep this requests number of SBALs up-to-date */
470 fsf_req->sbal_number++; 241 fsf_req->sbal_number++;
@@ -479,214 +250,246 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
479 return sbale; 250 return sbale;
480} 251}
481 252
482/**
483 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
484 */
485static volatile struct qdio_buffer_element * 253static volatile struct qdio_buffer_element *
486zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 254zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
487{ 255{
488 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 256 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
489 return zfcp_qdio_sbal_chain(fsf_req, sbtype); 257 return zfcp_qdio_sbal_chain(fsf_req, sbtype);
490
491 fsf_req->sbale_curr++; 258 fsf_req->sbale_curr++;
492
493 return zfcp_qdio_sbale_curr(fsf_req); 259 return zfcp_qdio_sbale_curr(fsf_req);
494} 260}
495 261
496/** 262static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req)
497 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
498 * with zero from
499 */
500static int
501zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
502{
503 struct qdio_buffer **buf = queue->buffer;
504 int curr = first;
505 int count = 0;
506
507 for(;;) {
508 curr %= QDIO_MAX_BUFFERS_PER_Q;
509 count++;
510 memset(buf[curr], 0, sizeof(struct qdio_buffer));
511 if (curr == last)
512 break;
513 curr++;
514 }
515 return count;
516}
517
518
519/**
520 * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req
521 */
522static inline int
523zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
524{ 263{
525 return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue, 264 struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal;
526 fsf_req->sbal_first, fsf_req->sbal_curr); 265 int first = fsf_req->sbal_first;
266 int last = fsf_req->sbal_last;
267 int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
268 QDIO_MAX_BUFFERS_PER_Q + 1;
269 zfcp_qdio_zero_sbals(sbal, first, count);
527} 270}
528 271
529 272static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
530/** 273 unsigned int sbtype, void *start_addr,
531 * zfcp_qdio_sbale_fill - set address and length in current SBALE 274 unsigned int total_length)
532 * on request_queue
533 */
534static void
535zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
536 void *addr, int length)
537{ 275{
538 volatile struct qdio_buffer_element *sbale; 276 volatile struct qdio_buffer_element *sbale;
539
540 sbale = zfcp_qdio_sbale_curr(fsf_req);
541 sbale->addr = addr;
542 sbale->length = length;
543}
544
545/**
546 * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s)
547 * @fsf_req: request to be processed
548 * @sbtype: SBALE flags
549 * @start_addr: address of memory segment
550 * @total_length: length of memory segment
551 *
552 * Alignment and length of the segment determine how many SBALEs are needed
553 * for the memory segment.
554 */
555static int
556zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
557 void *start_addr, unsigned long total_length)
558{
559 unsigned long remaining, length; 277 unsigned long remaining, length;
560 void *addr; 278 void *addr;
561 279
562 /* split segment up heeding page boundaries */ 280 /* split segment up */
563 for (addr = start_addr, remaining = total_length; remaining > 0; 281 for (addr = start_addr, remaining = total_length; remaining > 0;
564 addr += length, remaining -= length) { 282 addr += length, remaining -= length) {
565 /* get next free SBALE for new piece */ 283 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
566 if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) { 284 if (!sbale) {
567 /* no SBALE left, clean up and leave */ 285 zfcp_qdio_undo_sbals(fsf_req);
568 zfcp_qdio_sbals_wipe(fsf_req);
569 return -EINVAL; 286 return -EINVAL;
570 } 287 }
571 /* calculate length of new piece */ 288
289 /* new piece must not exceed next page boundary */
572 length = min(remaining, 290 length = min(remaining,
573 (PAGE_SIZE - ((unsigned long) addr & 291 (PAGE_SIZE - ((unsigned long)addr &
574 (PAGE_SIZE - 1)))); 292 (PAGE_SIZE - 1))));
575 /* fill current SBALE with calculated piece */ 293 sbale->addr = addr;
576 zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length); 294 sbale->length = length;
577 } 295 }
578 return total_length; 296 return 0;
579} 297}
580 298
581
582/** 299/**
583 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 300 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
584 * @fsf_req: request to be processed 301 * @fsf_req: request to be processed
585 * @sbtype: SBALE flags 302 * @sbtype: SBALE flags
586 * @sg: scatter-gather list 303 * @sg: scatter-gather list
587 * @sg_count: number of elements in scatter-gather list
588 * @max_sbals: upper bound for number of SBALs to be used 304 * @max_sbals: upper bound for number of SBALs to be used
305 * Returns: number of bytes, or error (negativ)
589 */ 306 */
590int 307int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
591zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 308 struct scatterlist *sg, int max_sbals)
592 struct scatterlist *sgl, int sg_count, int max_sbals)
593{ 309{
594 int sg_index;
595 struct scatterlist *sg_segment;
596 int retval;
597 volatile struct qdio_buffer_element *sbale; 310 volatile struct qdio_buffer_element *sbale;
598 int bytes = 0; 311 int retval, bytes = 0;
599 312
600 /* figure out last allowed SBAL */ 313 /* figure out last allowed SBAL */
601 zfcp_qdio_sbal_limit(fsf_req, max_sbals); 314 zfcp_qdio_sbal_limit(fsf_req, max_sbals);
602 315
603 /* set storage-block type for current SBAL */ 316 /* set storage-block type for this request */
604 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 317 sbale = zfcp_qdio_sbale_req(fsf_req);
605 sbale->flags |= sbtype; 318 sbale->flags |= sbtype;
606 319
607 /* process all segements of scatter-gather list */ 320 for (; sg; sg = sg_next(sg)) {
608 for_each_sg(sgl, sg_segment, sg_count, sg_index) { 321 retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg),
609 retval = zfcp_qdio_sbals_from_segment( 322 sg->length);
610 fsf_req, 323 if (retval < 0)
611 sbtype, 324 return retval;
612 zfcp_sg_to_address(sg_segment), 325 bytes += sg->length;
613 sg_segment->length);
614 if (retval < 0) {
615 bytes = retval;
616 goto out;
617 } else
618 bytes += retval;
619 } 326 }
327
620 /* assume that no other SBALEs are to follow in the same SBAL */ 328 /* assume that no other SBALEs are to follow in the same SBAL */
621 sbale = zfcp_qdio_sbale_curr(fsf_req); 329 sbale = zfcp_qdio_sbale_curr(fsf_req);
622 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 330 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
623out: 331
624 return bytes; 332 return bytes;
625} 333}
626 334
627
628/** 335/**
629 * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command 336 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
630 * @fsf_req: request to be processed 337 * @fsf_req: pointer to struct zfcp_fsf_req
631 * @sbtype: SBALE flags 338 * Returns: 0 on success, error otherwise
632 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
633 * to fill SBALs
634 */ 339 */
635int 340int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
636zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
637 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
638{ 341{
639 return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, scsi_sglist(scsi_cmnd), 342 struct zfcp_adapter *adapter = fsf_req->adapter;
640 scsi_sg_count(scsi_cmnd), 343 struct zfcp_qdio_queue *req_q = &adapter->req_q;
641 ZFCP_MAX_SBALS_PER_REQ); 344 int first = fsf_req->sbal_first;
345 int count = fsf_req->sbal_number;
346 int retval, pci, pci_batch;
347 volatile struct qdio_buffer_element *sbale;
348
349 /* acknowledgements for transferred buffers */
350 pci_batch = req_q->pci_batch + count;
351 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
352 pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
353 pci = first + count - (pci_batch + 1);
354 pci %= QDIO_MAX_BUFFERS_PER_Q;
355 sbale = zfcp_qdio_sbale(req_q, pci, 0);
356 sbale->flags |= SBAL_FLAGS0_PCI;
357 }
358
359 retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
360 count);
361 if (unlikely(retval)) {
362 zfcp_qdio_zero_sbals(req_q->sbal, first, count);
363 return retval;
364 }
365
366 /* account for transferred buffers */
367 atomic_sub(count, &req_q->count);
368 req_q->first += count;
369 req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
370 req_q->pci_batch = pci_batch;
371 return 0;
642} 372}
643 373
644/** 374/**
645 * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed 375 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
376 * @adapter: pointer to struct zfcp_adapter
377 * Returns: -ENOMEM on memory allocation error or return value from
378 * qdio_allocate
646 */ 379 */
647int 380int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
648zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue,
649 struct zfcp_fsf_req *fsf_req)
650{ 381{
651 int new_distance_from_int; 382 struct qdio_initialize *init_data;
652 int pci_pos;
653 volatile struct qdio_buffer_element *sbale;
654 383
655 new_distance_from_int = req_queue->distance_from_int + 384 if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) ||
656 fsf_req->sbal_number; 385 zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal))
657 386 return -ENOMEM;
658 if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) { 387
659 new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL; 388 init_data = &adapter->qdio_init_data;
660 pci_pos = fsf_req->sbal_first; 389
661 pci_pos += fsf_req->sbal_number; 390 init_data->cdev = adapter->ccw_device;
662 pci_pos -= new_distance_from_int; 391 init_data->q_format = QDIO_ZFCP_QFMT;
663 pci_pos -= 1; 392 memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8);
664 pci_pos %= QDIO_MAX_BUFFERS_PER_Q; 393 ASCEBC(init_data->adapter_name, 8);
665 sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0); 394 init_data->qib_param_field_format = 0;
666 sbale->flags |= SBAL_FLAGS0_PCI; 395 init_data->qib_param_field = NULL;
667 } 396 init_data->input_slib_elements = NULL;
668 return new_distance_from_int; 397 init_data->output_slib_elements = NULL;
398 init_data->no_input_qs = 1;
399 init_data->no_output_qs = 1;
400 init_data->input_handler = zfcp_qdio_int_resp;
401 init_data->output_handler = zfcp_qdio_int_req;
402 init_data->int_parm = (unsigned long) adapter;
403 init_data->flags = QDIO_INBOUND_0COPY_SBALS |
404 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
405 init_data->input_sbal_addr_array =
406 (void **) (adapter->resp_q.sbal);
407 init_data->output_sbal_addr_array =
408 (void **) (adapter->req_q.sbal);
409
410 return qdio_allocate(init_data);
669} 411}
670 412
671/* 413/**
672 * function: zfcp_zero_sbals 414 * zfcp_close_qdio - close qdio queues for an adapter
673 *
674 * purpose: zeros specified range of SBALs
675 *
676 * returns:
677 */ 415 */
678void 416void zfcp_qdio_close(struct zfcp_adapter *adapter)
679zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count)
680{ 417{
681 int cur_pos; 418 struct zfcp_qdio_queue *req_q;
682 int index; 419 int first, count;
683 420
684 for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) { 421 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
685 index = cur_pos % QDIO_MAX_BUFFERS_PER_Q; 422 return;
686 memset(buf[index], 0, sizeof (struct qdio_buffer)); 423
687 ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n", 424 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
688 index, buf[index]); 425 req_q = &adapter->req_q;
426 spin_lock(&req_q->lock);
427 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
428 spin_unlock(&req_q->lock);
429
430 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
431
432 /* cleanup used outbound sbals */
433 count = atomic_read(&req_q->count);
434 if (count < QDIO_MAX_BUFFERS_PER_Q) {
435 first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q;
436 count = QDIO_MAX_BUFFERS_PER_Q - count;
437 zfcp_qdio_zero_sbals(req_q->sbal, first, count);
689 } 438 }
439 req_q->first = 0;
440 atomic_set(&req_q->count, 0);
441 req_q->pci_batch = 0;
442 adapter->resp_q.first = 0;
443 atomic_set(&adapter->resp_q.count, 0);
690} 444}
691 445
692#undef ZFCP_LOG_AREA 446/**
447 * zfcp_qdio_open - prepare and initialize response queue
448 * @adapter: pointer to struct zfcp_adapter
449 * Returns: 0 on success, otherwise -EIO
450 */
451int zfcp_qdio_open(struct zfcp_adapter *adapter)
452{
453 volatile struct qdio_buffer_element *sbale;
454 int cc;
455
456 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
457 return -EIO;
458
459 if (qdio_establish(&adapter->qdio_init_data)) {
460 dev_err(&adapter->ccw_device->dev,
461 "Establish of QDIO queues failed.\n");
462 return -EIO;
463 }
464
465 if (qdio_activate(adapter->ccw_device)) {
466 dev_err(&adapter->ccw_device->dev,
467 "Activate of QDIO queues failed.\n");
468 goto failed_qdio;
469 }
470
471 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
472 sbale = &(adapter->resp_q.sbal[cc]->element[0]);
473 sbale->length = 0;
474 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
475 sbale->addr = NULL;
476 }
477
478 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
479 QDIO_MAX_BUFFERS_PER_Q)) {
480 dev_err(&adapter->ccw_device->dev,
481 "Init of QDIO response queue failed.\n");
482 goto failed_qdio;
483 }
484
485 /* set index of first avalable SBALS / number of available SBALS */
486 adapter->req_q.first = 0;
487 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
488 adapter->req_q.pci_batch = 0;
489
490 return 0;
491
492failed_qdio:
493 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
494 return -EIO;
495}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 01687559dc06..aeae56b00b45 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -1,220 +1,65 @@
1/* 1/*
2 * This file is part of the zfcp device driver for 2 * zfcp device driver
3 * FCP adapters for IBM System z9 and zSeries.
4 * 3 *
5 * (C) Copyright IBM Corp. 2002, 2006 4 * Interface to Linux SCSI midlayer.
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify 6 * Copyright IBM Corporation 2002, 2008
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 7 */
21 8
22#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
23
24#include "zfcp_ext.h" 9#include "zfcp_ext.h"
25#include <asm/atomic.h> 10#include <asm/atomic.h>
26 11
27static void zfcp_scsi_slave_destroy(struct scsi_device *sdp);
28static int zfcp_scsi_slave_alloc(struct scsi_device *sdp);
29static int zfcp_scsi_slave_configure(struct scsi_device *sdp);
30static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
31 void (*done) (struct scsi_cmnd *));
32static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
33static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
34static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *);
35static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
36static int zfcp_task_management_function(struct zfcp_unit *, u8,
37 struct scsi_cmnd *);
38
39static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
40 unsigned int, unsigned int);
41
42static struct device_attribute *zfcp_sysfs_sdev_attrs[];
43static struct device_attribute *zfcp_a_stats_attrs[];
44
45struct zfcp_data zfcp_data = {
46 .scsi_host_template = {
47 .name = ZFCP_NAME,
48 .module = THIS_MODULE,
49 .proc_name = "zfcp",
50 .slave_alloc = zfcp_scsi_slave_alloc,
51 .slave_configure = zfcp_scsi_slave_configure,
52 .slave_destroy = zfcp_scsi_slave_destroy,
53 .queuecommand = zfcp_scsi_queuecommand,
54 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
55 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
56 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
57 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
58 .can_queue = 4096,
59 .this_id = -1,
60 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
61 .cmd_per_lun = 1,
62 .use_clustering = 1,
63 .sdev_attrs = zfcp_sysfs_sdev_attrs,
64 .max_sectors = ZFCP_MAX_SECTORS,
65 .shost_attrs = zfcp_a_stats_attrs,
66 },
67 .driver_version = ZFCP_VERSION,
68};
69
70/* Find start of Response Information in FCP response unit*/
71char *
72zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
73{
74 char *fcp_rsp_info_ptr;
75
76 fcp_rsp_info_ptr =
77 (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
78
79 return fcp_rsp_info_ptr;
80}
81
82/* Find start of Sense Information in FCP response unit*/ 12/* Find start of Sense Information in FCP response unit*/
83char * 13char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
84zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
85{ 14{
86 char *fcp_sns_info_ptr; 15 char *fcp_sns_info_ptr;
87 16
88 fcp_sns_info_ptr = 17 fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1];
89 (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu));
90 if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid) 18 if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)
91 fcp_sns_info_ptr = (char *) fcp_sns_info_ptr + 19 fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len;
92 fcp_rsp_iu->fcp_rsp_len;
93 20
94 return fcp_sns_info_ptr; 21 return fcp_sns_info_ptr;
95} 22}
96 23
97static fcp_dl_t * 24void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
98zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
99{ 25{
100 int additional_length = fcp_cmd->add_fcp_cdb_length << 2; 26 fcp_dl_t *fcp_dl_ptr;
101 fcp_dl_t *fcp_dl_addr;
102 27
103 fcp_dl_addr = (fcp_dl_t *)
104 ((unsigned char *) fcp_cmd +
105 sizeof (struct fcp_cmnd_iu) + additional_length);
106 /* 28 /*
107 * fcp_dl_addr = start address of fcp_cmnd structure + 29 * fcp_dl_addr = start address of fcp_cmnd structure +
108 * size of fixed part + size of dynamically sized add_dcp_cdb field 30 * size of fixed part + size of dynamically sized add_dcp_cdb field
109 * SEE FCP-2 documentation 31 * SEE FCP-2 documentation
110 */ 32 */
111 return fcp_dl_addr; 33 fcp_dl_ptr = (fcp_dl_t *) ((unsigned char *) &fcp_cmd[1] +
34 (fcp_cmd->add_fcp_cdb_length << 2));
35 *fcp_dl_ptr = fcp_dl;
112} 36}
113 37
114fcp_dl_t
115zfcp_get_fcp_dl(struct fcp_cmnd_iu * fcp_cmd)
116{
117 return *zfcp_get_fcp_dl_ptr(fcp_cmd);
118}
119
120void
121zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
122{
123 *zfcp_get_fcp_dl_ptr(fcp_cmd) = fcp_dl;
124}
125
126/*
127 * note: it's a bit-or operation not an assignment
128 * regarding the specified byte
129 */
130static inline void
131set_byte(int *result, char status, char pos)
132{
133 *result |= status << (pos * 8);
134}
135
136void
137set_host_byte(int *result, char status)
138{
139 set_byte(result, status, 2);
140}
141
142void
143set_driver_byte(int *result, char status)
144{
145 set_byte(result, status, 3);
146}
147
148static int
149zfcp_scsi_slave_alloc(struct scsi_device *sdp)
150{
151 struct zfcp_adapter *adapter;
152 struct zfcp_unit *unit;
153 unsigned long flags;
154 int retval = -ENXIO;
155
156 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
157 if (!adapter)
158 goto out;
159
160 read_lock_irqsave(&zfcp_data.config_lock, flags);
161 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
162 if (unit && atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED,
163 &unit->status)) {
164 sdp->hostdata = unit;
165 unit->device = sdp;
166 zfcp_unit_get(unit);
167 retval = 0;
168 }
169 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
170 out:
171 return retval;
172}
173
174/**
175 * zfcp_scsi_slave_destroy - called when scsi device is removed
176 *
177 * Remove reference to associated scsi device for an zfcp_unit.
178 * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs
179 * or a scan for this device might have failed.
180 */
181static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 38static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
182{ 39{
183 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 40 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
184 41 WARN_ON(!unit);
185 if (unit) { 42 if (unit) {
186 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 43 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
187 sdpnt->hostdata = NULL; 44 sdpnt->hostdata = NULL;
188 unit->device = NULL; 45 unit->device = NULL;
189 zfcp_erp_unit_failed(unit, 12, NULL); 46 zfcp_erp_unit_failed(unit, 12, NULL);
190 zfcp_unit_put(unit); 47 zfcp_unit_put(unit);
191 } else 48 }
192 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
193 "address %p\n", sdpnt);
194} 49}
195 50
196/* 51static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
197 * called from scsi midlayer to allow finetuning of a device.
198 */
199static int
200zfcp_scsi_slave_configure(struct scsi_device *sdp)
201{ 52{
202 if (sdp->tagged_supported) 53 if (sdp->tagged_supported)
203 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, ZFCP_CMND_PER_LUN); 54 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32);
204 else 55 else
205 scsi_adjust_queue_depth(sdp, 0, 1); 56 scsi_adjust_queue_depth(sdp, 0, 1);
206 return 0; 57 return 0;
207} 58}
208 59
209/** 60static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
210 * zfcp_scsi_command_fail - set result in scsi_cmnd and call scsi_done function
211 * @scpnt: pointer to struct scsi_cmnd where result is set
212 * @result: result to be set in scpnt (e.g. DID_ERROR)
213 */
214static void
215zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
216{ 61{
217 set_host_byte(&scpnt->result, result); 62 set_host_byte(scpnt, result);
218 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 63 if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
219 zfcp_scsi_dbf_event_result("fail", 4, 64 zfcp_scsi_dbf_event_result("fail", 4,
220 (struct zfcp_adapter*) scpnt->device->host->hostdata[0], 65 (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
@@ -223,114 +68,13 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
223 scpnt->scsi_done(scpnt); 68 scpnt->scsi_done(scpnt);
224} 69}
225 70
226/** 71static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
227 * zfcp_scsi_command_async - worker for zfcp_scsi_queuecommand and 72 void (*done) (struct scsi_cmnd *))
228 * zfcp_scsi_command_sync
229 * @adapter: adapter where scsi command is issued
230 * @unit: unit to which scsi command is sent
231 * @scpnt: scsi command to be sent
232 * @timer: timer to be started if request is successfully initiated
233 *
234 * Note: In scsi_done function must be set in scpnt.
235 */
236int
237zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
238 struct scsi_cmnd *scpnt, int use_timer)
239{
240 int tmp;
241 int retval;
242
243 retval = 0;
244
245 BUG_ON((adapter == NULL) || (adapter != unit->port->adapter));
246 BUG_ON(scpnt->scsi_done == NULL);
247
248 if (unlikely(NULL == unit)) {
249 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
250 goto out;
251 }
252
253 if (unlikely(
254 atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status) ||
255 !atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status))) {
256 ZFCP_LOG_DEBUG("stopping SCSI I/O on unit 0x%016Lx on port "
257 "0x%016Lx on adapter %s\n",
258 unit->fcp_lun, unit->port->wwpn,
259 zfcp_get_busid_by_adapter(adapter));
260 zfcp_scsi_command_fail(scpnt, DID_ERROR);
261 goto out;
262 }
263
264 tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
265 ZFCP_REQ_AUTO_CLEANUP);
266 if (unlikely(tmp == -EBUSY)) {
267 ZFCP_LOG_DEBUG("adapter %s not ready or unit 0x%016Lx "
268 "on port 0x%016Lx in recovery\n",
269 zfcp_get_busid_by_unit(unit),
270 unit->fcp_lun, unit->port->wwpn);
271 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
272 goto out;
273 }
274
275 if (unlikely(tmp < 0)) {
276 ZFCP_LOG_DEBUG("error: initiation of Send FCP Cmnd failed\n");
277 retval = SCSI_MLQUEUE_HOST_BUSY;
278 }
279
280out:
281 return retval;
282}
283
284static void
285zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
286{
287 struct completion *wait = (struct completion *) scpnt->SCp.ptr;
288 complete(wait);
289}
290
291
292/**
293 * zfcp_scsi_command_sync - send a SCSI command and wait for completion
294 * @unit: unit where command is sent to
295 * @scpnt: scsi command to be sent
296 * @use_timer: indicates whether timer should be setup or not
297 * Return: 0
298 *
299 * Errors are indicated in scpnt->result
300 */
301int
302zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
303 int use_timer)
304{
305 int ret;
306 DECLARE_COMPLETION_ONSTACK(wait);
307
308 scpnt->SCp.ptr = (void *) &wait; /* silent re-use */
309 scpnt->scsi_done = zfcp_scsi_command_sync_handler;
310 ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt,
311 use_timer);
312 if (ret == 0)
313 wait_for_completion(&wait);
314
315 scpnt->SCp.ptr = NULL;
316
317 return 0;
318}
319
320/*
321 * function: zfcp_scsi_queuecommand
322 *
323 * purpose: enqueues a SCSI command to the specified target device
324 *
325 * returns: 0 - success, SCSI command enqueued
326 * !0 - failure
327 */
328static int
329zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
330 void (*done) (struct scsi_cmnd *))
331{ 73{
332 struct zfcp_unit *unit; 74 struct zfcp_unit *unit;
333 struct zfcp_adapter *adapter; 75 struct zfcp_adapter *adapter;
76 int status;
77 int ret;
334 78
335 /* reset the status for this request */ 79 /* reset the status for this request */
336 scpnt->result = 0; 80 scpnt->result = 0;
@@ -342,44 +86,76 @@ zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
342 * (stored there by zfcp_scsi_slave_alloc) 86 * (stored there by zfcp_scsi_slave_alloc)
343 */ 87 */
344 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 88 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
345 unit = (struct zfcp_unit *) scpnt->device->hostdata; 89 unit = scpnt->device->hostdata;
90
91 BUG_ON(!adapter || (adapter != unit->port->adapter));
92 BUG_ON(!scpnt->scsi_done);
346 93
347 return zfcp_scsi_command_async(adapter, unit, scpnt, 0); 94 if (unlikely(!unit)) {
95 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
96 return 0;
97 }
98
99 status = atomic_read(&unit->status);
100 if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
101 !(status & ZFCP_STATUS_COMMON_RUNNING))) {
102 zfcp_scsi_command_fail(scpnt, DID_ERROR);
103 return 0;;
104 }
105
106 ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0,
107 ZFCP_REQ_AUTO_CLEANUP);
108 if (unlikely(ret == -EBUSY))
109 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
110 else if (unlikely(ret < 0))
111 return SCSI_MLQUEUE_HOST_BUSY;
112
113 return ret;
348} 114}
349 115
350static struct zfcp_unit * 116static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
351zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id, 117 int channel, unsigned int id,
352 unsigned int lun) 118 unsigned int lun)
353{ 119{
354 struct zfcp_port *port; 120 struct zfcp_port *port;
355 struct zfcp_unit *unit, *retval = NULL; 121 struct zfcp_unit *unit;
356 122
357 list_for_each_entry(port, &adapter->port_list_head, list) { 123 list_for_each_entry(port, &adapter->port_list_head, list) {
358 if (!port->rport || (id != port->rport->scsi_target_id)) 124 if (!port->rport || (id != port->rport->scsi_target_id))
359 continue; 125 continue;
360 list_for_each_entry(unit, &port->unit_list_head, list) 126 list_for_each_entry(unit, &port->unit_list_head, list)
361 if (lun == unit->scsi_lun) { 127 if (lun == unit->scsi_lun)
362 retval = unit; 128 return unit;
363 goto out;
364 }
365 } 129 }
366 out: 130
131 return NULL;
132}
133
134static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
135{
136 struct zfcp_adapter *adapter;
137 struct zfcp_unit *unit;
138 unsigned long flags;
139 int retval = -ENXIO;
140
141 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
142 if (!adapter)
143 goto out;
144
145 read_lock_irqsave(&zfcp_data.config_lock, flags);
146 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
147 if (unit &&
148 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_REGISTERED)) {
149 sdp->hostdata = unit;
150 unit->device = sdp;
151 zfcp_unit_get(unit);
152 retval = 0;
153 }
154 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
155out:
367 return retval; 156 return retval;
368} 157}
369 158
370/**
371 * zfcp_scsi_eh_abort_handler - abort the specified SCSI command
372 * @scpnt: pointer to scsi_cmnd to be aborted
373 * Return: SUCCESS - command has been aborted and cleaned up in internal
374 * bookkeeping, SCSI stack won't be called for aborted command
375 * FAILED - otherwise
376 *
377 * We do not need to care for a SCSI command which completes normally
378 * but late during this abort routine runs. We are allowed to return
379 * late commands to the SCSI stack. It tracks the state of commands and
380 * will handle late commands. (Usually, the normal completion of late
381 * commands is ignored with respect to the running abort operation.)
382 */
383static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 159static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 160{
385 struct Scsi_Host *scsi_host; 161 struct Scsi_Host *scsi_host;
@@ -387,44 +163,37 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
387 struct zfcp_unit *unit; 163 struct zfcp_unit *unit;
388 struct zfcp_fsf_req *fsf_req; 164 struct zfcp_fsf_req *fsf_req;
389 unsigned long flags; 165 unsigned long flags;
390 unsigned long old_req_id; 166 unsigned long old_req_id = (unsigned long) scpnt->host_scribble;
391 int retval = SUCCESS; 167 int retval = SUCCESS;
392 168
393 scsi_host = scpnt->device->host; 169 scsi_host = scpnt->device->host;
394 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; 170 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
395 unit = (struct zfcp_unit *) scpnt->device->hostdata; 171 unit = scpnt->device->hostdata;
396
397 ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
398 scpnt, zfcp_get_busid_by_adapter(adapter));
399 172
400 /* avoid race condition between late normal completion and abort */ 173 /* avoid race condition between late normal completion and abort */
401 write_lock_irqsave(&adapter->abort_lock, flags); 174 write_lock_irqsave(&adapter->abort_lock, flags);
402 175
403 /* Check whether corresponding fsf_req is still pending */ 176 /* Check whether corresponding fsf_req is still pending */
404 spin_lock(&adapter->req_list_lock); 177 spin_lock(&adapter->req_list_lock);
405 fsf_req = zfcp_reqlist_find(adapter, 178 fsf_req = zfcp_reqlist_find(adapter, old_req_id);
406 (unsigned long) scpnt->host_scribble);
407 spin_unlock(&adapter->req_list_lock); 179 spin_unlock(&adapter->req_list_lock);
408 if (!fsf_req) { 180 if (!fsf_req) {
409 write_unlock_irqrestore(&adapter->abort_lock, flags); 181 write_unlock_irqrestore(&adapter->abort_lock, flags);
410 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0); 182 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0);
411 retval = SUCCESS; 183 return retval;
412 goto out;
413 } 184 }
414 fsf_req->data = 0; 185 fsf_req->data = NULL;
415 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING; 186 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
416 old_req_id = fsf_req->req_id;
417 187
418 /* don't access old fsf_req after releasing the abort_lock */ 188 /* don't access old fsf_req after releasing the abort_lock */
419 write_unlock_irqrestore(&adapter->abort_lock, flags); 189 write_unlock_irqrestore(&adapter->abort_lock, flags);
420 190
421 fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0); 191 fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0);
422 if (!fsf_req) { 192 if (!fsf_req) {
423 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
424 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 193 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
425 old_req_id); 194 old_req_id);
426 retval = FAILED; 195 retval = FAILED;
427 goto out; 196 return retval;
428 } 197 }
429 198
430 __wait_event(fsf_req->completion_wq, 199 __wait_event(fsf_req->completion_wq,
@@ -432,66 +201,29 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
432 201
433 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 202 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
434 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0); 203 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0);
435 retval = SUCCESS;
436 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 204 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
437 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0); 205 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0);
438 retval = SUCCESS;
439 } else { 206 } else {
440 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0); 207 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0);
441 retval = FAILED; 208 retval = FAILED;
442 } 209 }
443 zfcp_fsf_req_free(fsf_req); 210 zfcp_fsf_req_free(fsf_req);
444 out:
445 return retval;
446}
447
448static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
449{
450 int retval;
451 struct zfcp_unit *unit = scpnt->device->hostdata;
452 211
453 if (!unit) { 212 return retval;
454 WARN_ON(1);
455 return SUCCESS;
456 }
457 retval = zfcp_task_management_function(unit,
458 FCP_LOGICAL_UNIT_RESET,
459 scpnt);
460 return retval ? FAILED : SUCCESS;
461}
462
463static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
464{
465 int retval;
466 struct zfcp_unit *unit = scpnt->device->hostdata;
467
468 if (!unit) {
469 WARN_ON(1);
470 return SUCCESS;
471 }
472 retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt);
473 return retval ? FAILED : SUCCESS;
474} 213}
475 214
476static int 215static int zfcp_task_mgmt_function(struct zfcp_unit *unit, u8 tm_flags,
477zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, 216 struct scsi_cmnd *scpnt)
478 struct scsi_cmnd *scpnt)
479{ 217{
480 struct zfcp_adapter *adapter = unit->port->adapter; 218 struct zfcp_adapter *adapter = unit->port->adapter;
481 struct zfcp_fsf_req *fsf_req; 219 struct zfcp_fsf_req *fsf_req;
482 int retval = 0; 220 int retval = SUCCESS;
483 221
484 /* issue task management function */ 222 /* issue task management function */
485 fsf_req = zfcp_fsf_send_fcp_command_task_management 223 fsf_req = zfcp_fsf_send_fcp_ctm(adapter, unit, tm_flags, 0);
486 (adapter, unit, tm_flags, 0);
487 if (!fsf_req) { 224 if (!fsf_req) {
488 ZFCP_LOG_INFO("error: creation of task management request "
489 "failed for unit 0x%016Lx on port 0x%016Lx on "
490 "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
491 zfcp_get_busid_by_adapter(adapter));
492 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt); 225 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt);
493 retval = -ENOMEM; 226 return FAILED;
494 goto out;
495 } 227 }
496 228
497 __wait_event(fsf_req->completion_wq, 229 __wait_event(fsf_req->completion_wq,
@@ -502,87 +234,90 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
502 */ 234 */
503 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 235 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
504 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); 236 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
505 retval = -EIO; 237 retval = FAILED;
506 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) { 238 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
507 zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt); 239 zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
508 retval = -ENOTSUPP; 240 retval = FAILED;
509 } else 241 } else
510 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt); 242 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
511 243
512 zfcp_fsf_req_free(fsf_req); 244 zfcp_fsf_req_free(fsf_req);
513 out: 245
514 return retval; 246 return retval;
515} 247}
516 248
517/** 249static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
518 * zfcp_scsi_eh_host_reset_handler - handler for host reset 250{
519 */ 251 struct zfcp_unit *unit = scpnt->device->hostdata;
252
253 if (!unit) {
254 WARN_ON(1);
255 return SUCCESS;
256 }
257 return zfcp_task_mgmt_function(unit, FCP_LOGICAL_UNIT_RESET, scpnt);
258}
259
260static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
261{
262 struct zfcp_unit *unit = scpnt->device->hostdata;
263
264 if (!unit) {
265 WARN_ON(1);
266 return SUCCESS;
267 }
268 return zfcp_task_mgmt_function(unit, FCP_TARGET_RESET, scpnt);
269}
270
520static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 271static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
521{ 272{
522 struct zfcp_unit *unit; 273 struct zfcp_unit *unit;
523 struct zfcp_adapter *adapter; 274 struct zfcp_adapter *adapter;
524 275
525 unit = (struct zfcp_unit*) scpnt->device->hostdata; 276 unit = scpnt->device->hostdata;
526 adapter = unit->port->adapter; 277 adapter = unit->port->adapter;
527
528 ZFCP_LOG_NORMAL("host reset because of problems with "
529 "unit 0x%016Lx on port 0x%016Lx, adapter %s\n",
530 unit->fcp_lun, unit->port->wwpn,
531 zfcp_get_busid_by_adapter(unit->port->adapter));
532
533 zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt); 278 zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt);
534 zfcp_erp_wait(adapter); 279 zfcp_erp_wait(adapter);
535 280
536 return SUCCESS; 281 return SUCCESS;
537} 282}
538 283
539int 284int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
540zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
541{ 285{
542 int retval = 0; 286 struct ccw_dev_id dev_id;
543 static unsigned int unique_id = 0;
544 287
545 if (adapter->scsi_host) 288 if (adapter->scsi_host)
546 goto out; 289 return 0;
547 290
291 ccw_device_get_id(adapter->ccw_device, &dev_id);
548 /* register adapter as SCSI host with mid layer of SCSI stack */ 292 /* register adapter as SCSI host with mid layer of SCSI stack */
549 adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template, 293 adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
550 sizeof (struct zfcp_adapter *)); 294 sizeof (struct zfcp_adapter *));
551 if (!adapter->scsi_host) { 295 if (!adapter->scsi_host) {
552 ZFCP_LOG_NORMAL("error: registration with SCSI stack failed " 296 dev_err(&adapter->ccw_device->dev,
553 "for adapter %s ", 297 "registration with SCSI stack failed.");
554 zfcp_get_busid_by_adapter(adapter)); 298 return -EIO;
555 retval = -EIO;
556 goto out;
557 } 299 }
558 ZFCP_LOG_DEBUG("host registered, scsi_host=%p\n", adapter->scsi_host);
559 300
560 /* tell the SCSI stack some characteristics of this adapter */ 301 /* tell the SCSI stack some characteristics of this adapter */
561 adapter->scsi_host->max_id = 1; 302 adapter->scsi_host->max_id = 1;
562 adapter->scsi_host->max_lun = 1; 303 adapter->scsi_host->max_lun = 1;
563 adapter->scsi_host->max_channel = 0; 304 adapter->scsi_host->max_channel = 0;
564 adapter->scsi_host->unique_id = unique_id++; /* FIXME */ 305 adapter->scsi_host->unique_id = dev_id.devno;
565 adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH; 306 adapter->scsi_host->max_cmd_len = 255;
566 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; 307 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
567 308
568 /*
569 * save a pointer to our own adapter data structure within
570 * hostdata field of SCSI host data structure
571 */
572 adapter->scsi_host->hostdata[0] = (unsigned long) adapter; 309 adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
573 310
574 if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { 311 if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
575 scsi_host_put(adapter->scsi_host); 312 scsi_host_put(adapter->scsi_host);
576 retval = -EIO; 313 return -EIO;
577 goto out;
578 } 314 }
579 atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status); 315 atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
580 out: 316
581 return retval; 317 return 0;
582} 318}
583 319
584void 320void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
585zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
586{ 321{
587 struct Scsi_Host *shost; 322 struct Scsi_Host *shost;
588 struct zfcp_port *port; 323 struct zfcp_port *port;
@@ -590,10 +325,12 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
590 shost = adapter->scsi_host; 325 shost = adapter->scsi_host;
591 if (!shost) 326 if (!shost)
592 return; 327 return;
328
593 read_lock_irq(&zfcp_data.config_lock); 329 read_lock_irq(&zfcp_data.config_lock);
594 list_for_each_entry(port, &adapter->port_list_head, list) 330 list_for_each_entry(port, &adapter->port_list_head, list)
595 if (port->rport) 331 if (port->rport)
596 port->rport = NULL; 332 port->rport = NULL;
333
597 read_unlock_irq(&zfcp_data.config_lock); 334 read_unlock_irq(&zfcp_data.config_lock);
598 fc_remove_host(shost); 335 fc_remove_host(shost);
599 scsi_remove_host(shost); 336 scsi_remove_host(shost);
@@ -604,9 +341,6 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
604 return; 341 return;
605} 342}
606 343
607/*
608 * Support functions for FC transport class
609 */
610static struct fc_host_statistics* 344static struct fc_host_statistics*
611zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) 345zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
612{ 346{
@@ -622,13 +356,12 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
622 return adapter->fc_stats; 356 return adapter->fc_stats;
623} 357}
624 358
625static void 359static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
626zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, 360 struct fsf_qtcb_bottom_port *data,
627 struct fsf_qtcb_bottom_port *data, 361 struct fsf_qtcb_bottom_port *old)
628 struct fsf_qtcb_bottom_port *old)
629{ 362{
630 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset - 363 fc_stats->seconds_since_last_reset =
631 old->seconds_since_last_reset; 364 data->seconds_since_last_reset - old->seconds_since_last_reset;
632 fc_stats->tx_frames = data->tx_frames - old->tx_frames; 365 fc_stats->tx_frames = data->tx_frames - old->tx_frames;
633 fc_stats->tx_words = data->tx_words - old->tx_words; 366 fc_stats->tx_words = data->tx_words - old->tx_words;
634 fc_stats->rx_frames = data->rx_frames - old->rx_frames; 367 fc_stats->rx_frames = data->rx_frames - old->rx_frames;
@@ -639,26 +372,25 @@ zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
639 fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames; 372 fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
640 fc_stats->link_failure_count = data->link_failure - old->link_failure; 373 fc_stats->link_failure_count = data->link_failure - old->link_failure;
641 fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync; 374 fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
642 fc_stats->loss_of_signal_count = data->loss_of_signal - 375 fc_stats->loss_of_signal_count =
643 old->loss_of_signal; 376 data->loss_of_signal - old->loss_of_signal;
644 fc_stats->prim_seq_protocol_err_count = data->psp_error_counts - 377 fc_stats->prim_seq_protocol_err_count =
645 old->psp_error_counts; 378 data->psp_error_counts - old->psp_error_counts;
646 fc_stats->invalid_tx_word_count = data->invalid_tx_words - 379 fc_stats->invalid_tx_word_count =
647 old->invalid_tx_words; 380 data->invalid_tx_words - old->invalid_tx_words;
648 fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs; 381 fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
649 fc_stats->fcp_input_requests = data->input_requests - 382 fc_stats->fcp_input_requests =
650 old->input_requests; 383 data->input_requests - old->input_requests;
651 fc_stats->fcp_output_requests = data->output_requests - 384 fc_stats->fcp_output_requests =
652 old->output_requests; 385 data->output_requests - old->output_requests;
653 fc_stats->fcp_control_requests = data->control_requests - 386 fc_stats->fcp_control_requests =
654 old->control_requests; 387 data->control_requests - old->control_requests;
655 fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb; 388 fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
656 fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; 389 fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
657} 390}
658 391
659static void 392static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
660zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, 393 struct fsf_qtcb_bottom_port *data)
661 struct fsf_qtcb_bottom_port *data)
662{ 394{
663 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; 395 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
664 fc_stats->tx_frames = data->tx_frames; 396 fc_stats->tx_frames = data->tx_frames;
@@ -682,22 +414,14 @@ zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
682 fc_stats->fcp_output_megabytes = data->output_mb; 414 fc_stats->fcp_output_megabytes = data->output_mb;
683} 415}
684 416
685/** 417static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
686 * zfcp_get_fc_host_stats - provide fc_host_statistics for scsi_transport_fc
687 *
688 * assumption: scsi_transport_fc synchronizes calls of
689 * get_fc_host_stats and reset_fc_host_stats
690 * (XXX to be checked otherwise introduce locking)
691 */
692static struct fc_host_statistics *
693zfcp_get_fc_host_stats(struct Scsi_Host *shost)
694{ 418{
695 struct zfcp_adapter *adapter; 419 struct zfcp_adapter *adapter;
696 struct fc_host_statistics *fc_stats; 420 struct fc_host_statistics *fc_stats;
697 struct fsf_qtcb_bottom_port *data; 421 struct fsf_qtcb_bottom_port *data;
698 int ret; 422 int ret;
699 423
700 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 424 adapter = (struct zfcp_adapter *)host->hostdata[0];
701 fc_stats = zfcp_init_fc_host_stats(adapter); 425 fc_stats = zfcp_init_fc_host_stats(adapter);
702 if (!fc_stats) 426 if (!fc_stats)
703 return NULL; 427 return NULL;
@@ -709,26 +433,25 @@ zfcp_get_fc_host_stats(struct Scsi_Host *shost)
709 ret = zfcp_fsf_exchange_port_data_sync(adapter, data); 433 ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
710 if (ret) { 434 if (ret) {
711 kfree(data); 435 kfree(data);
712 return NULL; /* XXX return zeroed fc_stats? */ 436 return NULL;
713 } 437 }
714 438
715 if (adapter->stats_reset && 439 if (adapter->stats_reset &&
716 ((jiffies/HZ - adapter->stats_reset) < 440 ((jiffies/HZ - adapter->stats_reset) <
717 data->seconds_since_last_reset)) { 441 data->seconds_since_last_reset))
718 zfcp_adjust_fc_host_stats(fc_stats, data, 442 zfcp_adjust_fc_host_stats(fc_stats, data,
719 adapter->stats_reset_data); 443 adapter->stats_reset_data);
720 } else 444 else
721 zfcp_set_fc_host_stats(fc_stats, data); 445 zfcp_set_fc_host_stats(fc_stats, data);
722 446
723 kfree(data); 447 kfree(data);
724 return fc_stats; 448 return fc_stats;
725} 449}
726 450
727static void 451static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
728zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
729{ 452{
730 struct zfcp_adapter *adapter; 453 struct zfcp_adapter *adapter;
731 struct fsf_qtcb_bottom_port *data, *old_data; 454 struct fsf_qtcb_bottom_port *data;
732 int ret; 455 int ret;
733 456
734 adapter = (struct zfcp_adapter *)shost->hostdata[0]; 457 adapter = (struct zfcp_adapter *)shost->hostdata[0];
@@ -737,17 +460,33 @@ zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
737 return; 460 return;
738 461
739 ret = zfcp_fsf_exchange_port_data_sync(adapter, data); 462 ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
740 if (ret) { 463 if (ret)
741 kfree(data); 464 kfree(data);
742 } else { 465 else {
743 adapter->stats_reset = jiffies/HZ; 466 adapter->stats_reset = jiffies/HZ;
744 old_data = adapter->stats_reset_data; 467 kfree(adapter->stats_reset_data);
745 adapter->stats_reset_data = data; /* finally freed in 468 adapter->stats_reset_data = data; /* finally freed in
746 adater_dequeue */ 469 adapter_dequeue */
747 kfree(old_data);
748 } 470 }
749} 471}
750 472
473static void zfcp_get_host_port_state(struct Scsi_Host *shost)
474{
475 struct zfcp_adapter *adapter =
476 (struct zfcp_adapter *)shost->hostdata[0];
477 int status = atomic_read(&adapter->status);
478
479 if ((status & ZFCP_STATUS_COMMON_RUNNING) &&
480 !(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED))
481 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
482 else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
483 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
484 else if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
485 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
486 else
487 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
488}
489
751static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) 490static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
752{ 491{
753 rport->dev_loss_tmo = timeout; 492 rport->dev_loss_tmo = timeout;
@@ -770,6 +509,8 @@ struct fc_function_template zfcp_transport_functions = {
770 .get_fc_host_stats = zfcp_get_fc_host_stats, 509 .get_fc_host_stats = zfcp_get_fc_host_stats,
771 .reset_fc_host_stats = zfcp_reset_fc_host_stats, 510 .reset_fc_host_stats = zfcp_reset_fc_host_stats,
772 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, 511 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
512 .get_host_port_state = zfcp_get_host_port_state,
513 .show_host_port_state = 1,
773 /* no functions registered for following dynamic attributes but 514 /* no functions registered for following dynamic attributes but
774 directly set by LLDD */ 515 directly set by LLDD */
775 .show_host_port_type = 1, 516 .show_host_port_type = 1,
@@ -778,149 +519,26 @@ struct fc_function_template zfcp_transport_functions = {
778 .disable_target_scan = 1, 519 .disable_target_scan = 1,
779}; 520};
780 521
781/** 522struct zfcp_data zfcp_data = {
782 * ZFCP_DEFINE_SCSI_ATTR 523 .scsi_host_template = {
783 * @_name: name of show attribute 524 .name = "zfcp",
784 * @_format: format string 525 .module = THIS_MODULE,
785 * @_value: value to print 526 .proc_name = "zfcp",
786 * 527 .slave_alloc = zfcp_scsi_slave_alloc,
787 * Generates attribute for a unit. 528 .slave_configure = zfcp_scsi_slave_configure,
788 */ 529 .slave_destroy = zfcp_scsi_slave_destroy,
789#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \ 530 .queuecommand = zfcp_scsi_queuecommand,
790static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, struct device_attribute *attr, \ 531 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
791 char *buf) \ 532 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
792{ \ 533 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
793 struct scsi_device *sdev; \ 534 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
794 struct zfcp_unit *unit; \ 535 .can_queue = 4096,
795 \ 536 .this_id = -1,
796 sdev = to_scsi_device(dev); \ 537 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
797 unit = sdev->hostdata; \ 538 .cmd_per_lun = 1,
798 return sprintf(buf, _format, _value); \ 539 .use_clustering = 1,
799} \ 540 .sdev_attrs = zfcp_sysfs_sdev_attrs,
800 \ 541 .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8),
801static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); 542 .shost_attrs = zfcp_sysfs_shost_attrs,
802 543 },
803ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", zfcp_get_busid_by_unit(unit));
804ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
805ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
806
807static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
808 &dev_attr_fcp_lun,
809 &dev_attr_wwpn,
810 &dev_attr_hba_id,
811 NULL
812};
813
814static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
815 struct device_attribute *attr,
816 char *buf)
817{
818 struct Scsi_Host *scsi_host = dev_to_shost(dev);
819 struct fsf_qtcb_bottom_port *qtcb_port;
820 int retval;
821 struct zfcp_adapter *adapter;
822
823 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
824 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
825 return -EOPNOTSUPP;
826
827 qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
828 if (!qtcb_port)
829 return -ENOMEM;
830
831 retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
832 if (!retval)
833 retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
834 qtcb_port->cb_util, qtcb_port->a_util);
835 kfree(qtcb_port);
836 return retval;
837}
838
839static int zfcp_sysfs_adapter_ex_config(struct device *dev,
840 struct fsf_statistics_info *stat_inf)
841{
842 int retval;
843 struct fsf_qtcb_bottom_config *qtcb_config;
844 struct Scsi_Host *scsi_host = dev_to_shost(dev);
845 struct zfcp_adapter *adapter;
846
847 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
848 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
849 return -EOPNOTSUPP;
850
851 qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
852 GFP_KERNEL);
853 if (!qtcb_config)
854 return -ENOMEM;
855
856 retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
857 if (!retval)
858 *stat_inf = qtcb_config->stat_info;
859
860 kfree(qtcb_config);
861 return retval;
862}
863
864static ssize_t zfcp_sysfs_adapter_request_show(struct device *dev,
865 struct device_attribute *attr,
866 char *buf)
867{
868 struct fsf_statistics_info stat_info;
869 int retval;
870
871 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
872 if (retval)
873 return retval;
874
875 return sprintf(buf, "%llu %llu %llu\n",
876 (unsigned long long) stat_info.input_req,
877 (unsigned long long) stat_info.output_req,
878 (unsigned long long) stat_info.control_req);
879}
880
881static ssize_t zfcp_sysfs_adapter_mb_show(struct device *dev,
882 struct device_attribute *attr,
883 char *buf)
884{
885 struct fsf_statistics_info stat_info;
886 int retval;
887
888 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
889 if (retval)
890 return retval;
891
892 return sprintf(buf, "%llu %llu\n",
893 (unsigned long long) stat_info.input_mb,
894 (unsigned long long) stat_info.output_mb);
895}
896
897static ssize_t zfcp_sysfs_adapter_sec_active_show(struct device *dev,
898 struct device_attribute *attr,
899 char *buf)
900{
901 struct fsf_statistics_info stat_info;
902 int retval;
903
904 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
905 if (retval)
906 return retval;
907
908 return sprintf(buf, "%llu\n",
909 (unsigned long long) stat_info.seconds_act);
910}
911
912static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
913static DEVICE_ATTR(requests, S_IRUGO, zfcp_sysfs_adapter_request_show, NULL);
914static DEVICE_ATTR(megabytes, S_IRUGO, zfcp_sysfs_adapter_mb_show, NULL);
915static DEVICE_ATTR(seconds_active, S_IRUGO,
916 zfcp_sysfs_adapter_sec_active_show, NULL);
917
918static struct device_attribute *zfcp_a_stats_attrs[] = {
919 &dev_attr_utilization,
920 &dev_attr_requests,
921 &dev_attr_megabytes,
922 &dev_attr_seconds_active,
923 NULL
924}; 544};
925
926#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
new file mode 100644
index 000000000000..2e85c6c49e7d
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -0,0 +1,496 @@
1/*
2 * zfcp device driver
3 *
4 * sysfs attributes.
5 *
6 * Copyright IBM Corporation 2008
7 */
8
9#include "zfcp_ext.h"
10
11#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
12struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\
13 _show, _store)
14#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \
15static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
16 struct device_attribute *at,\
17 char *buf) \
18{ \
19 struct _feat_def *_feat = dev_get_drvdata(dev); \
20 \
21 return sprintf(buf, _format, _value); \
22} \
23static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
24 zfcp_sysfs_##_feat##_##_name##_show, NULL);
25
26ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
27 atomic_read(&adapter->status));
28ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
29 adapter->peer_wwnn);
30ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
31 adapter->peer_wwpn);
32ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
33 adapter->peer_d_id);
34ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
35 adapter->hydra_version);
36ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n",
37 adapter->fsf_lic_version);
38ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n",
39 adapter->hardware_version);
40ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n",
41 (atomic_read(&adapter->status) &
42 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
43
44ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
45 atomic_read(&port->status));
46ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n",
47 (atomic_read(&port->status) &
48 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
49ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
50 (atomic_read(&port->status) &
51 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
52
53ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
54 atomic_read(&unit->status));
55ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
56 (atomic_read(&unit->status) &
57 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
58ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
59 (atomic_read(&unit->status) &
60 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
61ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
62 (atomic_read(&unit->status) &
63 ZFCP_STATUS_UNIT_SHARED) != 0);
64ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
65 (atomic_read(&unit->status) &
66 ZFCP_STATUS_UNIT_READONLY) != 0);
67
68#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \
69static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
70 struct device_attribute *attr, \
71 char *buf) \
72{ \
73 struct _feat_def *_feat = dev_get_drvdata(dev); \
74 \
75 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
76 return sprintf(buf, "1\n"); \
77 else \
78 return sprintf(buf, "0\n"); \
79} \
80static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
81 struct device_attribute *attr,\
82 const char *buf, size_t count)\
83{ \
84 struct _feat_def *_feat = dev_get_drvdata(dev); \
85 unsigned long val; \
86 int retval = 0; \
87 \
88 down(&zfcp_data.config_sema); \
89 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \
90 retval = -EBUSY; \
91 goto out; \
92 } \
93 \
94 if (strict_strtoul(buf, 0, &val) || val != 0) { \
95 retval = -EINVAL; \
96 goto out; \
97 } \
98 \
99 zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \
100 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
101 zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \
102 _reopen_id, NULL); \
103 zfcp_erp_wait(_adapter); \
104out: \
105 up(&zfcp_data.config_sema); \
106 return retval ? retval : (ssize_t) count; \
107} \
108static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
109 zfcp_sysfs_##_feat##_failed_show, \
110 zfcp_sysfs_##_feat##_failed_store);
111
112ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, 44, 93);
113ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, 45, 96);
114ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, 46, 97);
115
116static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
117 struct device_attribute *attr,
118 const char *buf, size_t count)
119{
120 struct zfcp_adapter *adapter = dev_get_drvdata(dev);
121 int ret;
122
123 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
124 return -EBUSY;
125
126 ret = zfcp_scan_ports(adapter);
127 return ret ? ret : (ssize_t) count;
128}
129static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
130 zfcp_sysfs_port_rescan_store);
131
132static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
133 struct device_attribute *attr,
134 const char *buf, size_t count)
135{
136 struct zfcp_adapter *adapter = dev_get_drvdata(dev);
137 struct zfcp_port *port;
138 wwn_t wwpn;
139 int retval = 0;
140
141 down(&zfcp_data.config_sema);
142 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
143 retval = -EBUSY;
144 goto out;
145 }
146
147 if (strict_strtoull(buf, 0, &wwpn)) {
148 retval = -EINVAL;
149 goto out;
150 }
151
152 write_lock_irq(&zfcp_data.config_lock);
153 port = zfcp_get_port_by_wwpn(adapter, wwpn);
154 if (port && (atomic_read(&port->refcount) == 0)) {
155 zfcp_port_get(port);
156 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
157 list_move(&port->list, &adapter->port_remove_lh);
158 } else
159 port = NULL;
160 write_unlock_irq(&zfcp_data.config_lock);
161
162 if (!port) {
163 retval = -ENXIO;
164 goto out;
165 }
166
167 zfcp_erp_port_shutdown(port, 0, 92, NULL);
168 zfcp_erp_wait(adapter);
169 zfcp_port_put(port);
170 zfcp_port_dequeue(port);
171 out:
172 up(&zfcp_data.config_sema);
173 return retval ? retval : (ssize_t) count;
174}
175static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
176 zfcp_sysfs_port_remove_store);
177
178static struct attribute *zfcp_adapter_attrs[] = {
179 &dev_attr_adapter_failed.attr,
180 &dev_attr_adapter_in_recovery.attr,
181 &dev_attr_adapter_port_remove.attr,
182 &dev_attr_adapter_port_rescan.attr,
183 &dev_attr_adapter_peer_wwnn.attr,
184 &dev_attr_adapter_peer_wwpn.attr,
185 &dev_attr_adapter_peer_d_id.attr,
186 &dev_attr_adapter_card_version.attr,
187 &dev_attr_adapter_lic_version.attr,
188 &dev_attr_adapter_status.attr,
189 &dev_attr_adapter_hardware_version.attr,
190 NULL
191};
192
193struct attribute_group zfcp_sysfs_adapter_attrs = {
194 .attrs = zfcp_adapter_attrs,
195};
196
197static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
198 struct device_attribute *attr,
199 const char *buf, size_t count)
200{
201 struct zfcp_port *port = dev_get_drvdata(dev);
202 struct zfcp_unit *unit;
203 fcp_lun_t fcp_lun;
204 int retval = -EINVAL;
205
206 down(&zfcp_data.config_sema);
207 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
208 retval = -EBUSY;
209 goto out;
210 }
211
212 if (strict_strtoull(buf, 0, &fcp_lun))
213 goto out;
214
215 unit = zfcp_unit_enqueue(port, fcp_lun);
216 if (IS_ERR(unit))
217 goto out;
218
219 retval = 0;
220
221 zfcp_erp_unit_reopen(unit, 0, 94, NULL);
222 zfcp_erp_wait(unit->port->adapter);
223 zfcp_unit_put(unit);
224out:
225 up(&zfcp_data.config_sema);
226 return retval ? retval : (ssize_t) count;
227}
228static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
229
230static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
231 struct device_attribute *attr,
232 const char *buf, size_t count)
233{
234 struct zfcp_port *port = dev_get_drvdata(dev);
235 struct zfcp_unit *unit;
236 fcp_lun_t fcp_lun;
237 int retval = 0;
238
239 down(&zfcp_data.config_sema);
240 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
241 retval = -EBUSY;
242 goto out;
243 }
244
245 if (strict_strtoull(buf, 0, &fcp_lun)) {
246 retval = -EINVAL;
247 goto out;
248 }
249
250 write_lock_irq(&zfcp_data.config_lock);
251 unit = zfcp_get_unit_by_lun(port, fcp_lun);
252 if (unit && (atomic_read(&unit->refcount) == 0)) {
253 zfcp_unit_get(unit);
254 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
255 list_move(&unit->list, &port->unit_remove_lh);
256 } else
257 unit = NULL;
258
259 write_unlock_irq(&zfcp_data.config_lock);
260
261 if (!unit) {
262 retval = -ENXIO;
263 goto out;
264 }
265
266 zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
267 zfcp_erp_wait(unit->port->adapter);
268 zfcp_unit_put(unit);
269 zfcp_unit_dequeue(unit);
270out:
271 up(&zfcp_data.config_sema);
272 return retval ? retval : (ssize_t) count;
273}
274static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
275
276static struct attribute *zfcp_port_ns_attrs[] = {
277 &dev_attr_port_failed.attr,
278 &dev_attr_port_in_recovery.attr,
279 &dev_attr_port_status.attr,
280 &dev_attr_port_access_denied.attr,
281 NULL
282};
283
284/**
285 * zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver
286 */
287struct attribute_group zfcp_sysfs_ns_port_attrs = {
288 .attrs = zfcp_port_ns_attrs,
289};
290
291static struct attribute *zfcp_port_no_ns_attrs[] = {
292 &dev_attr_unit_add.attr,
293 &dev_attr_unit_remove.attr,
294 &dev_attr_port_failed.attr,
295 &dev_attr_port_in_recovery.attr,
296 &dev_attr_port_status.attr,
297 &dev_attr_port_access_denied.attr,
298 NULL
299};
300
301/**
302 * zfcp_sysfs_port_attrs - sysfs attributes for all other ports
303 */
304struct attribute_group zfcp_sysfs_port_attrs = {
305 .attrs = zfcp_port_no_ns_attrs,
306};
307
308static struct attribute *zfcp_unit_attrs[] = {
309 &dev_attr_unit_failed.attr,
310 &dev_attr_unit_in_recovery.attr,
311 &dev_attr_unit_status.attr,
312 &dev_attr_unit_access_denied.attr,
313 &dev_attr_unit_access_shared.attr,
314 &dev_attr_unit_access_readonly.attr,
315 NULL
316};
317
318struct attribute_group zfcp_sysfs_unit_attrs = {
319 .attrs = zfcp_unit_attrs,
320};
321
322#define ZFCP_DEFINE_LATENCY_ATTR(_name) \
323static ssize_t \
324zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
325 struct device_attribute *attr, \
326 char *buf) { \
327 struct scsi_device *sdev = to_scsi_device(dev); \
328 struct zfcp_unit *unit = sdev->hostdata; \
329 struct zfcp_latencies *lat = &unit->latencies; \
330 struct zfcp_adapter *adapter = unit->port->adapter; \
331 unsigned long flags; \
332 unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
333 \
334 spin_lock_irqsave(&lat->lock, flags); \
335 fsum = lat->_name.fabric.sum * adapter->timer_ticks; \
336 fmin = lat->_name.fabric.min * adapter->timer_ticks; \
337 fmax = lat->_name.fabric.max * adapter->timer_ticks; \
338 csum = lat->_name.channel.sum * adapter->timer_ticks; \
339 cmin = lat->_name.channel.min * adapter->timer_ticks; \
340 cmax = lat->_name.channel.max * adapter->timer_ticks; \
341 cc = lat->_name.counter; \
342 spin_unlock_irqrestore(&lat->lock, flags); \
343 \
344 do_div(fsum, 1000); \
345 do_div(fmin, 1000); \
346 do_div(fmax, 1000); \
347 do_div(csum, 1000); \
348 do_div(cmin, 1000); \
349 do_div(cmax, 1000); \
350 \
351 return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \
352 fmin, fmax, fsum, cmin, cmax, csum, cc); \
353} \
354static ssize_t \
355zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
356 struct device_attribute *attr, \
357 const char *buf, size_t count) \
358{ \
359 struct scsi_device *sdev = to_scsi_device(dev); \
360 struct zfcp_unit *unit = sdev->hostdata; \
361 struct zfcp_latencies *lat = &unit->latencies; \
362 unsigned long flags; \
363 \
364 spin_lock_irqsave(&lat->lock, flags); \
365 lat->_name.fabric.sum = 0; \
366 lat->_name.fabric.min = 0xFFFFFFFF; \
367 lat->_name.fabric.max = 0; \
368 lat->_name.channel.sum = 0; \
369 lat->_name.channel.min = 0xFFFFFFFF; \
370 lat->_name.channel.max = 0; \
371 lat->_name.counter = 0; \
372 spin_unlock_irqrestore(&lat->lock, flags); \
373 \
374 return (ssize_t) count; \
375} \
376static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \
377 zfcp_sysfs_unit_##_name##_latency_show, \
378 zfcp_sysfs_unit_##_name##_latency_store);
379
380ZFCP_DEFINE_LATENCY_ATTR(read);
381ZFCP_DEFINE_LATENCY_ATTR(write);
382ZFCP_DEFINE_LATENCY_ATTR(cmd);
383
384#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \
385static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
386 struct device_attribute *attr,\
387 char *buf) \
388{ \
389 struct scsi_device *sdev = to_scsi_device(dev); \
390 struct zfcp_unit *unit = sdev->hostdata; \
391 \
392 return sprintf(buf, _format, _value); \
393} \
394static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
395
396ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
397 unit->port->adapter->ccw_device->dev.bus_id);
398ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
399ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
400
401struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
402 &dev_attr_fcp_lun,
403 &dev_attr_wwpn,
404 &dev_attr_hba_id,
405 &dev_attr_read_latency,
406 &dev_attr_write_latency,
407 &dev_attr_cmd_latency,
408 NULL
409};
410
411static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
412 struct device_attribute *attr,
413 char *buf)
414{
415 struct Scsi_Host *scsi_host = dev_to_shost(dev);
416 struct fsf_qtcb_bottom_port *qtcb_port;
417 struct zfcp_adapter *adapter;
418 int retval;
419
420 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
421 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
422 return -EOPNOTSUPP;
423
424 qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
425 if (!qtcb_port)
426 return -ENOMEM;
427
428 retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
429 if (!retval)
430 retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
431 qtcb_port->cb_util, qtcb_port->a_util);
432 kfree(qtcb_port);
433 return retval;
434}
435static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
436
437static int zfcp_sysfs_adapter_ex_config(struct device *dev,
438 struct fsf_statistics_info *stat_inf)
439{
440 struct Scsi_Host *scsi_host = dev_to_shost(dev);
441 struct fsf_qtcb_bottom_config *qtcb_config;
442 struct zfcp_adapter *adapter;
443 int retval;
444
445 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
446 if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
447 return -EOPNOTSUPP;
448
449 qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
450 GFP_KERNEL);
451 if (!qtcb_config)
452 return -ENOMEM;
453
454 retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
455 if (!retval)
456 *stat_inf = qtcb_config->stat_info;
457
458 kfree(qtcb_config);
459 return retval;
460}
461
462#define ZFCP_SHOST_ATTR(_name, _format, _arg...) \
463static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
464 struct device_attribute *attr,\
465 char *buf) \
466{ \
467 struct fsf_statistics_info stat_info; \
468 int retval; \
469 \
470 retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \
471 if (retval) \
472 return retval; \
473 \
474 return sprintf(buf, _format, ## _arg); \
475} \
476static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
477
478ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n",
479 (unsigned long long) stat_info.input_req,
480 (unsigned long long) stat_info.output_req,
481 (unsigned long long) stat_info.control_req);
482
483ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
484 (unsigned long long) stat_info.input_mb,
485 (unsigned long long) stat_info.output_mb);
486
487ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
488 (unsigned long long) stat_info.seconds_act);
489
490struct device_attribute *zfcp_sysfs_shost_attrs[] = {
491 &dev_attr_utilization,
492 &dev_attr_requests,
493 &dev_attr_megabytes,
494 &dev_attr_seconds_active,
495 NULL
496};
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
deleted file mode 100644
index ccbba4dd3a77..000000000000
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ /dev/null
@@ -1,270 +0,0 @@
1/*
2 * This file is part of the zfcp device driver for
3 * FCP adapters for IBM System z9 and zSeries.
4 *
5 * (C) Copyright IBM Corp. 2002, 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "zfcp_ext.h"
23
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26/**
27 * ZFCP_DEFINE_ADAPTER_ATTR
28 * @_name: name of show attribute
29 * @_format: format string
30 * @_value: value to print
31 *
32 * Generates attributes for an adapter.
33 */
34#define ZFCP_DEFINE_ADAPTER_ATTR(_name, _format, _value) \
35static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct device_attribute *attr, \
36 char *buf) \
37{ \
38 struct zfcp_adapter *adapter; \
39 \
40 adapter = dev_get_drvdata(dev); \
41 return sprintf(buf, _format, _value); \
42} \
43 \
44static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
45
46ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
47ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
48ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
49ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
50ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
51ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
52ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
53 adapter->hardware_version);
54ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
55 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
56
57/**
58 * zfcp_sysfs_port_add_store - add a port to sysfs tree
59 * @dev: pointer to belonging device
60 * @buf: pointer to input buffer
61 * @count: number of bytes in buffer
62 *
63 * Store function of the "port_add" attribute of an adapter.
64 */
65static ssize_t
66zfcp_sysfs_port_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
67{
68 wwn_t wwpn;
69 char *endp;
70 struct zfcp_adapter *adapter;
71 struct zfcp_port *port;
72 int retval = -EINVAL;
73
74 down(&zfcp_data.config_sema);
75
76 adapter = dev_get_drvdata(dev);
77 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
78 retval = -EBUSY;
79 goto out;
80 }
81
82 wwpn = simple_strtoull(buf, &endp, 0);
83 if ((endp + 1) < (buf + count))
84 goto out;
85
86 port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
87 if (!port)
88 goto out;
89
90 retval = 0;
91
92 zfcp_erp_port_reopen(port, 0, 91, NULL);
93 zfcp_erp_wait(port->adapter);
94 zfcp_port_put(port);
95 out:
96 up(&zfcp_data.config_sema);
97 return retval ? retval : (ssize_t) count;
98}
99
100static DEVICE_ATTR(port_add, S_IWUSR, NULL, zfcp_sysfs_port_add_store);
101
102/**
103 * zfcp_sysfs_port_remove_store - remove a port from sysfs tree
104 * @dev: pointer to belonging device
105 * @buf: pointer to input buffer
106 * @count: number of bytes in buffer
107 *
108 * Store function of the "port_remove" attribute of an adapter.
109 */
110static ssize_t
111zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
112{
113 struct zfcp_adapter *adapter;
114 struct zfcp_port *port;
115 wwn_t wwpn;
116 char *endp;
117 int retval = 0;
118
119 down(&zfcp_data.config_sema);
120
121 adapter = dev_get_drvdata(dev);
122 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
123 retval = -EBUSY;
124 goto out;
125 }
126
127 wwpn = simple_strtoull(buf, &endp, 0);
128 if ((endp + 1) < (buf + count)) {
129 retval = -EINVAL;
130 goto out;
131 }
132
133 write_lock_irq(&zfcp_data.config_lock);
134 port = zfcp_get_port_by_wwpn(adapter, wwpn);
135 if (port && (atomic_read(&port->refcount) == 0)) {
136 zfcp_port_get(port);
137 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
138 list_move(&port->list, &adapter->port_remove_lh);
139 }
140 else {
141 port = NULL;
142 }
143 write_unlock_irq(&zfcp_data.config_lock);
144
145 if (!port) {
146 retval = -ENXIO;
147 goto out;
148 }
149
150 zfcp_erp_port_shutdown(port, 0, 92, NULL);
151 zfcp_erp_wait(adapter);
152 zfcp_port_put(port);
153 zfcp_port_dequeue(port);
154 out:
155 up(&zfcp_data.config_sema);
156 return retval ? retval : (ssize_t) count;
157}
158
159static DEVICE_ATTR(port_remove, S_IWUSR, NULL, zfcp_sysfs_port_remove_store);
160
161/**
162 * zfcp_sysfs_adapter_failed_store - failed state of adapter
163 * @dev: pointer to belonging device
164 * @buf: pointer to input buffer
165 * @count: number of bytes in buffer
166 *
167 * Store function of the "failed" attribute of an adapter.
168 * If a "0" gets written to "failed", error recovery will be
169 * started for the belonging adapter.
170 */
171static ssize_t
172zfcp_sysfs_adapter_failed_store(struct device *dev, struct device_attribute *attr,
173 const char *buf, size_t count)
174{
175 struct zfcp_adapter *adapter;
176 unsigned int val;
177 char *endp;
178 int retval = 0;
179
180 down(&zfcp_data.config_sema);
181
182 adapter = dev_get_drvdata(dev);
183 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) {
184 retval = -EBUSY;
185 goto out;
186 }
187
188 val = simple_strtoul(buf, &endp, 0);
189 if (((endp + 1) < (buf + count)) || (val != 0)) {
190 retval = -EINVAL;
191 goto out;
192 }
193
194 zfcp_erp_modify_adapter_status(adapter, 44, NULL,
195 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
196 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 93,
197 NULL);
198 zfcp_erp_wait(adapter);
199 out:
200 up(&zfcp_data.config_sema);
201 return retval ? retval : (ssize_t) count;
202}
203
204/**
205 * zfcp_sysfs_adapter_failed_show - failed state of adapter
206 * @dev: pointer to belonging device
207 * @buf: pointer to input buffer
208 *
209 * Show function of "failed" attribute of adapter. Will be
210 * "0" if adapter is working, otherwise "1".
211 */
212static ssize_t
213zfcp_sysfs_adapter_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
214{
215 struct zfcp_adapter *adapter;
216
217 adapter = dev_get_drvdata(dev);
218 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status))
219 return sprintf(buf, "1\n");
220 else
221 return sprintf(buf, "0\n");
222}
223
224static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_adapter_failed_show,
225 zfcp_sysfs_adapter_failed_store);
226
227static struct attribute *zfcp_adapter_attrs[] = {
228 &dev_attr_failed.attr,
229 &dev_attr_in_recovery.attr,
230 &dev_attr_port_remove.attr,
231 &dev_attr_port_add.attr,
232 &dev_attr_peer_wwnn.attr,
233 &dev_attr_peer_wwpn.attr,
234 &dev_attr_peer_d_id.attr,
235 &dev_attr_card_version.attr,
236 &dev_attr_lic_version.attr,
237 &dev_attr_status.attr,
238 &dev_attr_hardware_version.attr,
239 NULL
240};
241
242static struct attribute_group zfcp_adapter_attr_group = {
243 .attrs = zfcp_adapter_attrs,
244};
245
246/**
247 * zfcp_sysfs_create_adapter_files - create sysfs adapter files
248 * @dev: pointer to belonging device
249 *
250 * Create all attributes of the sysfs representation of an adapter.
251 */
252int
253zfcp_sysfs_adapter_create_files(struct device *dev)
254{
255 return sysfs_create_group(&dev->kobj, &zfcp_adapter_attr_group);
256}
257
258/**
259 * zfcp_sysfs_remove_adapter_files - remove sysfs adapter files
260 * @dev: pointer to belonging device
261 *
262 * Remove all attributes of the sysfs representation of an adapter.
263 */
264void
265zfcp_sysfs_adapter_remove_files(struct device *dev)
266{
267 sysfs_remove_group(&dev->kobj, &zfcp_adapter_attr_group);
268}
269
270#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_driver.c b/drivers/s390/scsi/zfcp_sysfs_driver.c
deleted file mode 100644
index 651edd58906a..000000000000
--- a/drivers/s390/scsi/zfcp_sysfs_driver.c
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * This file is part of the zfcp device driver for
3 * FCP adapters for IBM System z9 and zSeries.
4 *
5 * (C) Copyright IBM Corp. 2002, 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "zfcp_ext.h"
23
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26/**
27 * ZFCP_DEFINE_DRIVER_ATTR - define for all loglevels sysfs attributes
28 * @_name: name of attribute
29 * @_define: name of ZFCP loglevel define
30 *
31 * Generates store function for a sysfs loglevel attribute of zfcp driver.
32 */
33#define ZFCP_DEFINE_DRIVER_ATTR(_name, _define) \
34static ssize_t zfcp_sysfs_loglevel_##_name##_store(struct device_driver *drv, \
35 const char *buf, \
36 size_t count) \
37{ \
38 unsigned int loglevel; \
39 unsigned int new_loglevel; \
40 char *endp; \
41 \
42 new_loglevel = simple_strtoul(buf, &endp, 0); \
43 if ((endp + 1) < (buf + count)) \
44 return -EINVAL; \
45 if (new_loglevel > 3) \
46 return -EINVAL; \
47 down(&zfcp_data.config_sema); \
48 loglevel = atomic_read(&zfcp_data.loglevel); \
49 loglevel &= ~((unsigned int) 0xf << (ZFCP_LOG_AREA_##_define << 2)); \
50 loglevel |= new_loglevel << (ZFCP_LOG_AREA_##_define << 2); \
51 atomic_set(&zfcp_data.loglevel, loglevel); \
52 up(&zfcp_data.config_sema); \
53 return count; \
54} \
55 \
56static ssize_t zfcp_sysfs_loglevel_##_name##_show(struct device_driver *dev, \
57 char *buf) \
58{ \
59 return sprintf(buf,"%d\n", (unsigned int) \
60 ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA_##_define)); \
61} \
62 \
63static DRIVER_ATTR(loglevel_##_name, S_IWUSR | S_IRUGO, \
64 zfcp_sysfs_loglevel_##_name##_show, \
65 zfcp_sysfs_loglevel_##_name##_store);
66
67ZFCP_DEFINE_DRIVER_ATTR(other, OTHER);
68ZFCP_DEFINE_DRIVER_ATTR(scsi, SCSI);
69ZFCP_DEFINE_DRIVER_ATTR(fsf, FSF);
70ZFCP_DEFINE_DRIVER_ATTR(config, CONFIG);
71ZFCP_DEFINE_DRIVER_ATTR(cio, CIO);
72ZFCP_DEFINE_DRIVER_ATTR(qdio, QDIO);
73ZFCP_DEFINE_DRIVER_ATTR(erp, ERP);
74ZFCP_DEFINE_DRIVER_ATTR(fc, FC);
75
76static ssize_t zfcp_sysfs_version_show(struct device_driver *dev,
77 char *buf)
78{
79 return sprintf(buf, "%s\n", zfcp_data.driver_version);
80}
81
82static DRIVER_ATTR(version, S_IRUGO, zfcp_sysfs_version_show, NULL);
83
84static struct attribute *zfcp_driver_attrs[] = {
85 &driver_attr_loglevel_other.attr,
86 &driver_attr_loglevel_scsi.attr,
87 &driver_attr_loglevel_fsf.attr,
88 &driver_attr_loglevel_config.attr,
89 &driver_attr_loglevel_cio.attr,
90 &driver_attr_loglevel_qdio.attr,
91 &driver_attr_loglevel_erp.attr,
92 &driver_attr_loglevel_fc.attr,
93 &driver_attr_version.attr,
94 NULL
95};
96
97static struct attribute_group zfcp_driver_attr_group = {
98 .attrs = zfcp_driver_attrs,
99};
100
101struct attribute_group *zfcp_driver_attr_groups[] = {
102 &zfcp_driver_attr_group,
103 NULL,
104};
105
106#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_port.c b/drivers/s390/scsi/zfcp_sysfs_port.c
deleted file mode 100644
index 703c1b5cb602..000000000000
--- a/drivers/s390/scsi/zfcp_sysfs_port.c
+++ /dev/null
@@ -1,295 +0,0 @@
1/*
2 * This file is part of the zfcp device driver for
3 * FCP adapters for IBM System z9 and zSeries.
4 *
5 * (C) Copyright IBM Corp. 2002, 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "zfcp_ext.h"
23
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26/**
27 * zfcp_sysfs_port_release - gets called when a struct device port is released
28 * @dev: pointer to belonging device
29 */
30void
31zfcp_sysfs_port_release(struct device *dev)
32{
33 kfree(dev);
34}
35
36/**
37 * ZFCP_DEFINE_PORT_ATTR
38 * @_name: name of show attribute
39 * @_format: format string
40 * @_value: value to print
41 *
42 * Generates attributes for a port.
43 */
44#define ZFCP_DEFINE_PORT_ATTR(_name, _format, _value) \
45static ssize_t zfcp_sysfs_port_##_name##_show(struct device *dev, struct device_attribute *attr, \
46 char *buf) \
47{ \
48 struct zfcp_port *port; \
49 \
50 port = dev_get_drvdata(dev); \
51 return sprintf(buf, _format, _value); \
52} \
53 \
54static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_port_##_name##_show, NULL);
55
56ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status));
57ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask
58 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status));
59ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask
60 (ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status));
61
62/**
63 * zfcp_sysfs_unit_add_store - add a unit to sysfs tree
64 * @dev: pointer to belonging device
65 * @buf: pointer to input buffer
66 * @count: number of bytes in buffer
67 *
68 * Store function of the "unit_add" attribute of a port.
69 */
70static ssize_t
71zfcp_sysfs_unit_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
72{
73 fcp_lun_t fcp_lun;
74 char *endp;
75 struct zfcp_port *port;
76 struct zfcp_unit *unit;
77 int retval = -EINVAL;
78
79 down(&zfcp_data.config_sema);
80
81 port = dev_get_drvdata(dev);
82 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
83 retval = -EBUSY;
84 goto out;
85 }
86
87 fcp_lun = simple_strtoull(buf, &endp, 0);
88 if ((endp + 1) < (buf + count))
89 goto out;
90
91 unit = zfcp_unit_enqueue(port, fcp_lun);
92 if (!unit)
93 goto out;
94
95 retval = 0;
96
97 zfcp_erp_unit_reopen(unit, 0, 94, NULL);
98 zfcp_erp_wait(unit->port->adapter);
99 zfcp_unit_put(unit);
100 out:
101 up(&zfcp_data.config_sema);
102 return retval ? retval : (ssize_t) count;
103}
104
105static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
106
107/**
108 * zfcp_sysfs_unit_remove_store - remove a unit from sysfs tree
109 * @dev: pointer to belonging device
110 * @buf: pointer to input buffer
111 * @count: number of bytes in buffer
112 */
113static ssize_t
114zfcp_sysfs_unit_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
115{
116 struct zfcp_port *port;
117 struct zfcp_unit *unit;
118 fcp_lun_t fcp_lun;
119 char *endp;
120 int retval = 0;
121
122 down(&zfcp_data.config_sema);
123
124 port = dev_get_drvdata(dev);
125 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
126 retval = -EBUSY;
127 goto out;
128 }
129
130 fcp_lun = simple_strtoull(buf, &endp, 0);
131 if ((endp + 1) < (buf + count)) {
132 retval = -EINVAL;
133 goto out;
134 }
135
136 write_lock_irq(&zfcp_data.config_lock);
137 unit = zfcp_get_unit_by_lun(port, fcp_lun);
138 if (unit && (atomic_read(&unit->refcount) == 0)) {
139 zfcp_unit_get(unit);
140 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
141 list_move(&unit->list, &port->unit_remove_lh);
142 }
143 else {
144 unit = NULL;
145 }
146 write_unlock_irq(&zfcp_data.config_lock);
147
148 if (!unit) {
149 retval = -ENXIO;
150 goto out;
151 }
152
153 zfcp_erp_unit_shutdown(unit, 0, 95, NULL);
154 zfcp_erp_wait(unit->port->adapter);
155 zfcp_unit_put(unit);
156 zfcp_unit_dequeue(unit);
157 out:
158 up(&zfcp_data.config_sema);
159 return retval ? retval : (ssize_t) count;
160}
161
162static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
163
164/**
165 * zfcp_sysfs_port_failed_store - failed state of port
166 * @dev: pointer to belonging device
167 * @buf: pointer to input buffer
168 * @count: number of bytes in buffer
169 *
170 * Store function of the "failed" attribute of a port.
171 * If a "0" gets written to "failed", error recovery will be
172 * started for the belonging port.
173 */
174static ssize_t
175zfcp_sysfs_port_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
176{
177 struct zfcp_port *port;
178 unsigned int val;
179 char *endp;
180 int retval = 0;
181
182 down(&zfcp_data.config_sema);
183
184 port = dev_get_drvdata(dev);
185 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
186 retval = -EBUSY;
187 goto out;
188 }
189
190 val = simple_strtoul(buf, &endp, 0);
191 if (((endp + 1) < (buf + count)) || (val != 0)) {
192 retval = -EINVAL;
193 goto out;
194 }
195
196 zfcp_erp_modify_port_status(port, 45, NULL,
197 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
198 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 96, NULL);
199 zfcp_erp_wait(port->adapter);
200 out:
201 up(&zfcp_data.config_sema);
202 return retval ? retval : (ssize_t) count;
203}
204
205/**
206 * zfcp_sysfs_port_failed_show - failed state of port
207 * @dev: pointer to belonging device
208 * @buf: pointer to input buffer
209 *
210 * Show function of "failed" attribute of port. Will be
211 * "0" if port is working, otherwise "1".
212 */
213static ssize_t
214zfcp_sysfs_port_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
215{
216 struct zfcp_port *port;
217
218 port = dev_get_drvdata(dev);
219 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status))
220 return sprintf(buf, "1\n");
221 else
222 return sprintf(buf, "0\n");
223}
224
225static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_port_failed_show,
226 zfcp_sysfs_port_failed_store);
227
228/**
229 * zfcp_port_common_attrs
230 * sysfs attributes that are common for all kind of fc ports.
231 */
232static struct attribute *zfcp_port_common_attrs[] = {
233 &dev_attr_failed.attr,
234 &dev_attr_in_recovery.attr,
235 &dev_attr_status.attr,
236 &dev_attr_access_denied.attr,
237 NULL
238};
239
240static struct attribute_group zfcp_port_common_attr_group = {
241 .attrs = zfcp_port_common_attrs,
242};
243
244/**
245 * zfcp_port_no_ns_attrs
246 * sysfs attributes not to be used for nameserver ports.
247 */
248static struct attribute *zfcp_port_no_ns_attrs[] = {
249 &dev_attr_unit_add.attr,
250 &dev_attr_unit_remove.attr,
251 NULL
252};
253
254static struct attribute_group zfcp_port_no_ns_attr_group = {
255 .attrs = zfcp_port_no_ns_attrs,
256};
257
258/**
259 * zfcp_sysfs_port_create_files - create sysfs port files
260 * @dev: pointer to belonging device
261 *
262 * Create all attributes of the sysfs representation of a port.
263 */
264int
265zfcp_sysfs_port_create_files(struct device *dev, u32 flags)
266{
267 int retval;
268
269 retval = sysfs_create_group(&dev->kobj, &zfcp_port_common_attr_group);
270
271 if ((flags & ZFCP_STATUS_PORT_WKA) || retval)
272 return retval;
273
274 retval = sysfs_create_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
275 if (retval)
276 sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
277
278 return retval;
279}
280
281/**
282 * zfcp_sysfs_port_remove_files - remove sysfs port files
283 * @dev: pointer to belonging device
284 *
285 * Remove all attributes of the sysfs representation of a port.
286 */
287void
288zfcp_sysfs_port_remove_files(struct device *dev, u32 flags)
289{
290 sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group);
291 if (!(flags & ZFCP_STATUS_PORT_WKA))
292 sysfs_remove_group(&dev->kobj, &zfcp_port_no_ns_attr_group);
293}
294
295#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_sysfs_unit.c b/drivers/s390/scsi/zfcp_sysfs_unit.c
deleted file mode 100644
index 80fb2c2cf48a..000000000000
--- a/drivers/s390/scsi/zfcp_sysfs_unit.c
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 * This file is part of the zfcp device driver for
3 * FCP adapters for IBM System z9 and zSeries.
4 *
5 * (C) Copyright IBM Corp. 2002, 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "zfcp_ext.h"
23
24#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
25
26/**
27 * zfcp_sysfs_unit_release - gets called when a struct device unit is released
28 * @dev: pointer to belonging device
29 */
30void
31zfcp_sysfs_unit_release(struct device *dev)
32{
33 kfree(dev);
34}
35
36/**
37 * ZFCP_DEFINE_UNIT_ATTR
38 * @_name: name of show attribute
39 * @_format: format string
40 * @_value: value to print
41 *
42 * Generates attribute for a unit.
43 */
44#define ZFCP_DEFINE_UNIT_ATTR(_name, _format, _value) \
45static ssize_t zfcp_sysfs_unit_##_name##_show(struct device *dev, struct device_attribute *attr, \
46 char *buf) \
47{ \
48 struct zfcp_unit *unit; \
49 \
50 unit = dev_get_drvdata(dev); \
51 return sprintf(buf, _format, _value); \
52} \
53 \
54static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_unit_##_name##_show, NULL);
55
56ZFCP_DEFINE_UNIT_ATTR(status, "0x%08x\n", atomic_read(&unit->status));
57ZFCP_DEFINE_UNIT_ATTR(in_recovery, "%d\n", atomic_test_mask
58 (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status));
59ZFCP_DEFINE_UNIT_ATTR(access_denied, "%d\n", atomic_test_mask
60 (ZFCP_STATUS_COMMON_ACCESS_DENIED, &unit->status));
61ZFCP_DEFINE_UNIT_ATTR(access_shared, "%d\n", atomic_test_mask
62 (ZFCP_STATUS_UNIT_SHARED, &unit->status));
63ZFCP_DEFINE_UNIT_ATTR(access_readonly, "%d\n", atomic_test_mask
64 (ZFCP_STATUS_UNIT_READONLY, &unit->status));
65
66/**
67 * zfcp_sysfs_unit_failed_store - failed state of unit
68 * @dev: pointer to belonging device
69 * @buf: pointer to input buffer
70 * @count: number of bytes in buffer
71 *
72 * Store function of the "failed" attribute of a unit.
73 * If a "0" gets written to "failed", error recovery will be
74 * started for the belonging unit.
75 */
76static ssize_t
77zfcp_sysfs_unit_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
78{
79 struct zfcp_unit *unit;
80 unsigned int val;
81 char *endp;
82 int retval = 0;
83
84 down(&zfcp_data.config_sema);
85 unit = dev_get_drvdata(dev);
86 if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) {
87 retval = -EBUSY;
88 goto out;
89 }
90
91 val = simple_strtoul(buf, &endp, 0);
92 if (((endp + 1) < (buf + count)) || (val != 0)) {
93 retval = -EINVAL;
94 goto out;
95 }
96
97 zfcp_erp_modify_unit_status(unit, 46, NULL,
98 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
99 zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, 97, NULL);
100 zfcp_erp_wait(unit->port->adapter);
101 out:
102 up(&zfcp_data.config_sema);
103 return retval ? retval : (ssize_t) count;
104}
105
106/**
107 * zfcp_sysfs_unit_failed_show - failed state of unit
108 * @dev: pointer to belonging device
109 * @buf: pointer to input buffer
110 *
111 * Show function of "failed" attribute of unit. Will be
112 * "0" if unit is working, otherwise "1".
113 */
114static ssize_t
115zfcp_sysfs_unit_failed_show(struct device *dev, struct device_attribute *attr, char *buf)
116{
117 struct zfcp_unit *unit;
118
119 unit = dev_get_drvdata(dev);
120 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status))
121 return sprintf(buf, "1\n");
122 else
123 return sprintf(buf, "0\n");
124}
125
126static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_unit_failed_show,
127 zfcp_sysfs_unit_failed_store);
128
129static struct attribute *zfcp_unit_attrs[] = {
130 &dev_attr_failed.attr,
131 &dev_attr_in_recovery.attr,
132 &dev_attr_status.attr,
133 &dev_attr_access_denied.attr,
134 &dev_attr_access_shared.attr,
135 &dev_attr_access_readonly.attr,
136 NULL
137};
138
139static struct attribute_group zfcp_unit_attr_group = {
140 .attrs = zfcp_unit_attrs,
141};
142
143/**
144 * zfcp_sysfs_create_unit_files - create sysfs unit files
145 * @dev: pointer to belonging device
146 *
147 * Create all attributes of the sysfs representation of a unit.
148 */
149int
150zfcp_sysfs_unit_create_files(struct device *dev)
151{
152 return sysfs_create_group(&dev->kobj, &zfcp_unit_attr_group);
153}
154
155/**
156 * zfcp_sysfs_remove_unit_files - remove sysfs unit files
157 * @dev: pointer to belonging device
158 *
159 * Remove all attributes of the sysfs representation of a unit.
160 */
161void
162zfcp_sysfs_unit_remove_files(struct device *dev)
163{
164 sysfs_remove_group(&dev->kobj, &zfcp_unit_attr_group);
165}
166
167#undef ZFCP_LOG_AREA
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 81ccbd7f9e34..26be540d1dd3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -888,6 +888,25 @@ config SCSI_IBMVSCSIS
888 To compile this driver as a module, choose M here: the 888 To compile this driver as a module, choose M here: the
889 module will be called ibmvstgt. 889 module will be called ibmvstgt.
890 890
891config SCSI_IBMVFC
892 tristate "IBM Virtual FC support"
893 depends on PPC_PSERIES && SCSI
894 select SCSI_FC_ATTRS
895 help
896 This is the IBM POWER Virtual FC Client
897
898 To compile this driver as a module, choose M here: the
899 module will be called ibmvfc.
900
901config SCSI_IBMVFC_TRACE
902 bool "enable driver internal trace"
903 depends on SCSI_IBMVFC
904 default y
905 help
906 If you say Y here, the driver will trace all commands issued
907 to the adapter. Performance impact is minimal. Trace can be
908 dumped using /sys/class/scsi_host/hostXX/trace.
909
891config SCSI_INITIO 910config SCSI_INITIO
892 tristate "Initio 9100U(W) support" 911 tristate "Initio 9100U(W) support"
893 depends on PCI && SCSI 912 depends on PCI && SCSI
@@ -1738,10 +1757,12 @@ config SCSI_SUNESP
1738 select SCSI_SPI_ATTRS 1757 select SCSI_SPI_ATTRS
1739 help 1758 help
1740 This is the driver for the Sun ESP SCSI host adapter. The ESP 1759 This is the driver for the Sun ESP SCSI host adapter. The ESP
1741 chipset is present in most SPARC SBUS-based computers. 1760 chipset is present in most SPARC SBUS-based computers and
1761 supports the Emulex family of ESP SCSI chips (esp100, esp100A,
1762 esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip.
1742 1763
1743 To compile this driver as a module, choose M here: the 1764 To compile this driver as a module, choose M here: the
1744 module will be called esp. 1765 module will be called sun_esp.
1745 1766
1746config ZFCP 1767config ZFCP
1747 tristate "FCP host bus adapter driver for IBM eServer zSeries" 1768 tristate "FCP host bus adapter driver for IBM eServer zSeries"
@@ -1771,4 +1792,6 @@ endif # SCSI_LOWLEVEL
1771 1792
1772source "drivers/scsi/pcmcia/Kconfig" 1793source "drivers/scsi/pcmcia/Kconfig"
1773 1794
1795source "drivers/scsi/device_handler/Kconfig"
1796
1774endmenu 1797endmenu
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6c775e350c98..a8149677de23 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o 34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
35obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/ 35obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o 36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
37obj-$(CONFIG_SCSI_DH) += device_handler/
37 38
38obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o 39obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
39obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 40obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
@@ -118,6 +119,7 @@ obj-$(CONFIG_SCSI_IPR) += ipr.o
118obj-$(CONFIG_SCSI_SRP) += libsrp.o 119obj-$(CONFIG_SCSI_SRP) += libsrp.o
119obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ 120obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
120obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ 121obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
122obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
121obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 123obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
122obj-$(CONFIG_SCSI_STEX) += stex.o 124obj-$(CONFIG_SCSI_STEX) += stex.o
123obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 125obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index ced3eebe252c..84bb61628372 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -389,7 +389,7 @@ static u8 orc_load_firmware(struct orc_host * host)
389 389
390 outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */ 390 outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */
391 data32_ptr = (u8 *) & data32; 391 data32_ptr = (u8 *) & data32;
392 data32 = 0; /* Initial FW address to 0 */ 392 data32 = cpu_to_le32(0); /* Initial FW address to 0 */
393 outw(0x0010, host->base + ORC_EBIOSADR0); 393 outw(0x0010, host->base + ORC_EBIOSADR0);
394 *data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ 394 *data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
395 outw(0x0011, host->base + ORC_EBIOSADR0); 395 outw(0x0011, host->base + ORC_EBIOSADR0);
@@ -397,18 +397,19 @@ static u8 orc_load_firmware(struct orc_host * host)
397 outw(0x0012, host->base + ORC_EBIOSADR0); 397 outw(0x0012, host->base + ORC_EBIOSADR0);
398 *(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ 398 *(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
399 outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2); 399 outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2);
400 outl(data32, host->base + ORC_FWBASEADR); /* Write FW address */ 400 outl(le32_to_cpu(data32), host->base + ORC_FWBASEADR); /* Write FW address */
401 401
402 /* Copy the code from the BIOS to the SRAM */ 402 /* Copy the code from the BIOS to the SRAM */
403 403
404 bios_addr = (u16) data32; /* FW code locate at BIOS address + ? */ 404 udelay(500); /* Required on Sun Ultra 5 ... 350 -> failures */
405 bios_addr = (u16) le32_to_cpu(data32); /* FW code locate at BIOS address + ? */
405 for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */ 406 for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */
406 i < 0x1000; /* Firmware code size = 4K */ 407 i < 0x1000; /* Firmware code size = 4K */
407 i++, bios_addr++) { 408 i++, bios_addr++) {
408 outw(bios_addr, host->base + ORC_EBIOSADR0); 409 outw(bios_addr, host->base + ORC_EBIOSADR0);
409 *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ 410 *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
410 if ((i % 4) == 3) { 411 if ((i % 4) == 3) {
411 outl(data32, host->base + ORC_RISCRAM); /* Write every 4 bytes */ 412 outl(le32_to_cpu(data32), host->base + ORC_RISCRAM); /* Write every 4 bytes */
412 data32_ptr = (u8 *) & data32; 413 data32_ptr = (u8 *) & data32;
413 } 414 }
414 } 415 }
@@ -423,7 +424,7 @@ static u8 orc_load_firmware(struct orc_host * host)
423 outw(bios_addr, host->base + ORC_EBIOSADR0); 424 outw(bios_addr, host->base + ORC_EBIOSADR0);
424 *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ 425 *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
425 if ((i % 4) == 3) { 426 if ((i % 4) == 3) {
426 if (inl(host->base + ORC_RISCRAM) != data32) { 427 if (inl(host->base + ORC_RISCRAM) != le32_to_cpu(data32)) {
427 outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */ 428 outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
428 outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */ 429 outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */
429 return 0; 430 return 0;
@@ -459,8 +460,8 @@ static void setup_SCBs(struct orc_host * host)
459 460
460 for (i = 0; i < ORC_MAXQUEUE; i++) { 461 for (i = 0; i < ORC_MAXQUEUE; i++) {
461 escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i)); 462 escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i));
462 scb->sg_addr = (u32) escb_phys; 463 scb->sg_addr = cpu_to_le32((u32) escb_phys);
463 scb->sense_addr = (u32) escb_phys; 464 scb->sense_addr = cpu_to_le32((u32) escb_phys);
464 scb->escb = escb; 465 scb->escb = escb;
465 scb->scbidx = i; 466 scb->scbidx = i;
466 scb++; 467 scb++;
@@ -642,8 +643,8 @@ static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsig
642 scb->link = 0xFF; 643 scb->link = 0xFF;
643 scb->reserved0 = 0; 644 scb->reserved0 = 0;
644 scb->reserved1 = 0; 645 scb->reserved1 = 0;
645 scb->xferlen = 0; 646 scb->xferlen = cpu_to_le32(0);
646 scb->sg_len = 0; 647 scb->sg_len = cpu_to_le32(0);
647 648
648 escb->srb = NULL; 649 escb->srb = NULL;
649 escb->srb = cmd; 650 escb->srb = cmd;
@@ -839,7 +840,7 @@ static irqreturn_t orc_interrupt(struct orc_host * host)
839 * Build a host adapter control block from the SCSI mid layer command 840 * Build a host adapter control block from the SCSI mid layer command
840 */ 841 */
841 842
842static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd) 843static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd)
843{ /* Create corresponding SCB */ 844{ /* Create corresponding SCB */
844 struct scatterlist *sg; 845 struct scatterlist *sg;
845 struct orc_sgent *sgent; /* Pointer to SG list */ 846 struct orc_sgent *sgent; /* Pointer to SG list */
@@ -858,28 +859,30 @@ static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, stru
858 scb->lun = cmd->device->lun; 859 scb->lun = cmd->device->lun;
859 scb->reserved0 = 0; 860 scb->reserved0 = 0;
860 scb->reserved1 = 0; 861 scb->reserved1 = 0;
861 scb->sg_len = 0; 862 scb->sg_len = cpu_to_le32(0);
862 863
863 scb->xferlen = (u32) scsi_bufflen(cmd); 864 scb->xferlen = cpu_to_le32((u32) scsi_bufflen(cmd));
864 sgent = (struct orc_sgent *) & escb->sglist[0]; 865 sgent = (struct orc_sgent *) & escb->sglist[0];
865 866
866 count_sg = scsi_dma_map(cmd); 867 count_sg = scsi_dma_map(cmd);
867 BUG_ON(count_sg < 0); 868 if (count_sg < 0)
869 return count_sg;
870 BUG_ON(count_sg > TOTAL_SG_ENTRY);
868 871
869 /* Build the scatter gather lists */ 872 /* Build the scatter gather lists */
870 if (count_sg) { 873 if (count_sg) {
871 scb->sg_len = (u32) (count_sg * 8); 874 scb->sg_len = cpu_to_le32((u32) (count_sg * 8));
872 scsi_for_each_sg(cmd, sg, count_sg, i) { 875 scsi_for_each_sg(cmd, sg, count_sg, i) {
873 sgent->base = (u32) sg_dma_address(sg); 876 sgent->base = cpu_to_le32((u32) sg_dma_address(sg));
874 sgent->length = (u32) sg_dma_len(sg); 877 sgent->length = cpu_to_le32((u32) sg_dma_len(sg));
875 sgent++; 878 sgent++;
876 } 879 }
877 } else { 880 } else {
878 scb->sg_len = 0; 881 scb->sg_len = cpu_to_le32(0);
879 sgent->base = 0; 882 sgent->base = cpu_to_le32(0);
880 sgent->length = 0; 883 sgent->length = cpu_to_le32(0);
881 } 884 }
882 scb->sg_addr = (u32) scb->sense_addr; 885 scb->sg_addr = (u32) scb->sense_addr; /* sense_addr is already little endian */
883 scb->hastat = 0; 886 scb->hastat = 0;
884 scb->tastat = 0; 887 scb->tastat = 0;
885 scb->link = 0xFF; 888 scb->link = 0xFF;
@@ -896,6 +899,7 @@ static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, stru
896 scb->tag_msg = 0; /* No tag support */ 899 scb->tag_msg = 0; /* No tag support */
897 } 900 }
898 memcpy(scb->cdb, cmd->cmnd, scb->cdb_len); 901 memcpy(scb->cdb, cmd->cmnd, scb->cdb_len);
902 return 0;
899} 903}
900 904
901/** 905/**
@@ -919,7 +923,10 @@ static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
919 if ((scb = orc_alloc_scb(host)) == NULL) 923 if ((scb = orc_alloc_scb(host)) == NULL)
920 return SCSI_MLQUEUE_HOST_BUSY; 924 return SCSI_MLQUEUE_HOST_BUSY;
921 925
922 inia100_build_scb(host, scb, cmd); 926 if (inia100_build_scb(host, scb, cmd)) {
927 orc_release_scb(host, scb);
928 return SCSI_MLQUEUE_HOST_BUSY;
929 }
923 orc_exec_scb(host, scb); /* Start execute SCB */ 930 orc_exec_scb(host, scb); /* Start execute SCB */
924 return 0; 931 return 0;
925} 932}
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 5fd83deab36c..a7355260cfcf 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -41,6 +41,7 @@
41#include <linux/kthread.h> 41#include <linux/kthread.h>
42#include <linux/semaphore.h> 42#include <linux/semaphore.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <scsi/scsi_host.h>
44 45
45#include "aacraid.h" 46#include "aacraid.h"
46 47
@@ -581,6 +582,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
581 for (i = 0; i < upsg->count; i++) { 582 for (i = 0; i < upsg->count; i++) {
582 u64 addr; 583 u64 addr;
583 void* p; 584 void* p;
585 if (upsg->sg[i].count >
586 (dev->adapter_info.options &
587 AAC_OPT_NEW_COMM) ?
588 (dev->scsi_host_ptr->max_sectors << 9) :
589 65536) {
590 rcode = -EINVAL;
591 goto cleanup;
592 }
584 /* Does this really need to be GFP_DMA? */ 593 /* Does this really need to be GFP_DMA? */
585 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); 594 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
586 if(!p) { 595 if(!p) {
@@ -625,6 +634,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
625 for (i = 0; i < usg->count; i++) { 634 for (i = 0; i < usg->count; i++) {
626 u64 addr; 635 u64 addr;
627 void* p; 636 void* p;
637 if (usg->sg[i].count >
638 (dev->adapter_info.options &
639 AAC_OPT_NEW_COMM) ?
640 (dev->scsi_host_ptr->max_sectors << 9) :
641 65536) {
642 rcode = -EINVAL;
643 goto cleanup;
644 }
628 /* Does this really need to be GFP_DMA? */ 645 /* Does this really need to be GFP_DMA? */
629 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 646 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
630 if(!p) { 647 if(!p) {
@@ -667,6 +684,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
667 for (i = 0; i < upsg->count; i++) { 684 for (i = 0; i < upsg->count; i++) {
668 uintptr_t addr; 685 uintptr_t addr;
669 void* p; 686 void* p;
687 if (usg->sg[i].count >
688 (dev->adapter_info.options &
689 AAC_OPT_NEW_COMM) ?
690 (dev->scsi_host_ptr->max_sectors << 9) :
691 65536) {
692 rcode = -EINVAL;
693 goto cleanup;
694 }
670 /* Does this really need to be GFP_DMA? */ 695 /* Does this really need to be GFP_DMA? */
671 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 696 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
672 if(!p) { 697 if(!p) {
@@ -698,6 +723,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
698 for (i = 0; i < upsg->count; i++) { 723 for (i = 0; i < upsg->count; i++) {
699 dma_addr_t addr; 724 dma_addr_t addr;
700 void* p; 725 void* p;
726 if (upsg->sg[i].count >
727 (dev->adapter_info.options &
728 AAC_OPT_NEW_COMM) ?
729 (dev->scsi_host_ptr->max_sectors << 9) :
730 65536) {
731 rcode = -EINVAL;
732 goto cleanup;
733 }
701 p = kmalloc(upsg->sg[i].count, GFP_KERNEL); 734 p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
702 if (!p) { 735 if (!p) {
703 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 736 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 68c140e82673..9aa301c1ed07 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -865,7 +865,7 @@ static ssize_t aac_show_bios_version(struct device *device,
865 return len; 865 return len;
866} 866}
867 867
868ssize_t aac_show_serial_number(struct device *device, 868static ssize_t aac_show_serial_number(struct device *device,
869 struct device_attribute *attr, char *buf) 869 struct device_attribute *attr, char *buf)
870{ 870{
871 struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; 871 struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
new file mode 100644
index 000000000000..2adc0f666b68
--- /dev/null
+++ b/drivers/scsi/device_handler/Kconfig
@@ -0,0 +1,32 @@
1#
2# SCSI Device Handler configuration
3#
4
5menuconfig SCSI_DH
6 tristate "SCSI Device Handlers"
7 depends on SCSI
8 default n
9 help
10 SCSI Device Handlers provide device specific support for
11 devices utilized in multipath configurations. Say Y here to
12 select support for specific hardware.
13
14config SCSI_DH_RDAC
15 tristate "LSI RDAC Device Handler"
16 depends on SCSI_DH
17 help
18 If you have a LSI RDAC select y. Otherwise, say N.
19
20config SCSI_DH_HP_SW
21 tristate "HP/COMPAQ MSA Device Handler"
22 depends on SCSI_DH
23 help
24 If you have a HP/COMPAQ MSA device that requires START_STOP to
25 be sent to start it and cannot upgrade the firmware then select y.
26 Otherwise, say N.
27
28config SCSI_DH_EMC
29 tristate "EMC CLARiiON Device Handler"
30 depends on SCSI_DH
31 help
32 If you have a EMC CLARiiON select y. Otherwise, say N.
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
new file mode 100644
index 000000000000..35272e93b1c8
--- /dev/null
+++ b/drivers/scsi/device_handler/Makefile
@@ -0,0 +1,7 @@
1#
2# SCSI Device Handler
3#
4obj-$(CONFIG_SCSI_DH) += scsi_dh.o
5obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
6obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
7obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
new file mode 100644
index 000000000000..ab6c21cd9689
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -0,0 +1,162 @@
1/*
2 * SCSI device handler infrastruture.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2007
19 * Authors:
20 * Chandra Seetharaman <sekharan@us.ibm.com>
21 * Mike Anderson <andmike@linux.vnet.ibm.com>
22 */
23
24#include <scsi/scsi_dh.h>
25#include "../scsi_priv.h"
26
27static DEFINE_SPINLOCK(list_lock);
28static LIST_HEAD(scsi_dh_list);
29
30static struct scsi_device_handler *get_device_handler(const char *name)
31{
32 struct scsi_device_handler *tmp, *found = NULL;
33
34 spin_lock(&list_lock);
35 list_for_each_entry(tmp, &scsi_dh_list, list) {
36 if (!strcmp(tmp->name, name)) {
37 found = tmp;
38 break;
39 }
40 }
41 spin_unlock(&list_lock);
42 return found;
43}
44
45static int scsi_dh_notifier_add(struct device *dev, void *data)
46{
47 struct scsi_device_handler *scsi_dh = data;
48
49 scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
50 return 0;
51}
52
53/*
54 * scsi_register_device_handler - register a device handler personality
55 * module.
56 * @scsi_dh - device handler to be registered.
57 *
58 * Returns 0 on success, -EBUSY if handler already registered.
59 */
60int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
61{
62 int ret = -EBUSY;
63 struct scsi_device_handler *tmp;
64
65 tmp = get_device_handler(scsi_dh->name);
66 if (tmp)
67 goto done;
68
69 ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
70
71 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
72 spin_lock(&list_lock);
73 list_add(&scsi_dh->list, &scsi_dh_list);
74 spin_unlock(&list_lock);
75
76done:
77 return ret;
78}
79EXPORT_SYMBOL_GPL(scsi_register_device_handler);
80
81static int scsi_dh_notifier_remove(struct device *dev, void *data)
82{
83 struct scsi_device_handler *scsi_dh = data;
84
85 scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
86 return 0;
87}
88
89/*
90 * scsi_unregister_device_handler - register a device handler personality
91 * module.
92 * @scsi_dh - device handler to be unregistered.
93 *
94 * Returns 0 on success, -ENODEV if handler not registered.
95 */
96int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
97{
98 int ret = -ENODEV;
99 struct scsi_device_handler *tmp;
100
101 tmp = get_device_handler(scsi_dh->name);
102 if (!tmp)
103 goto done;
104
105 ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb);
106
107 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
108 scsi_dh_notifier_remove);
109 spin_lock(&list_lock);
110 list_del(&scsi_dh->list);
111 spin_unlock(&list_lock);
112
113done:
114 return ret;
115}
116EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
117
118/*
119 * scsi_dh_activate - activate the path associated with the scsi_device
120 * corresponding to the given request queue.
121 * @q - Request queue that is associated with the scsi_device to be
122 * activated.
123 */
124int scsi_dh_activate(struct request_queue *q)
125{
126 int err = 0;
127 unsigned long flags;
128 struct scsi_device *sdev;
129 struct scsi_device_handler *scsi_dh = NULL;
130
131 spin_lock_irqsave(q->queue_lock, flags);
132 sdev = q->queuedata;
133 if (sdev && sdev->scsi_dh_data)
134 scsi_dh = sdev->scsi_dh_data->scsi_dh;
135 if (!scsi_dh || !get_device(&sdev->sdev_gendev))
136 err = SCSI_DH_NOSYS;
137 spin_unlock_irqrestore(q->queue_lock, flags);
138
139 if (err)
140 return err;
141
142 if (scsi_dh->activate)
143 err = scsi_dh->activate(sdev);
144 put_device(&sdev->sdev_gendev);
145 return err;
146}
147EXPORT_SYMBOL_GPL(scsi_dh_activate);
148
149/*
150 * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
151 * the given name. FALSE(0) otherwise.
152 * @name - name of the device handler.
153 */
154int scsi_dh_handler_exist(const char *name)
155{
156 return (get_device_handler(name) != NULL);
157}
158EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
159
160MODULE_DESCRIPTION("SCSI device handler");
161MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
162MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
new file mode 100644
index 000000000000..f2467e936e55
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -0,0 +1,504 @@
1/*
2 * Target driver for EMC CLARiiON AX/CX-series hardware.
3 * Based on code from Lars Marowsky-Bree <lmb@suse.de>
4 * and Ed Goggin <egoggin@emc.com>.
5 *
6 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
7 * Copyright (C) 2006 Mike Christie
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, write to
21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23#include <scsi/scsi.h>
24#include <scsi/scsi_eh.h>
25#include <scsi/scsi_dh.h>
26#include <scsi/scsi_device.h>
27
28#define CLARIION_NAME "emc_clariion"
29
30#define CLARIION_TRESPASS_PAGE 0x22
31#define CLARIION_BUFFER_SIZE 0x80
32#define CLARIION_TIMEOUT (60 * HZ)
33#define CLARIION_RETRIES 3
34#define CLARIION_UNBOUND_LU -1
35
36static unsigned char long_trespass[] = {
37 0, 0, 0, 0,
38 CLARIION_TRESPASS_PAGE, /* Page code */
39 0x09, /* Page length - 2 */
40 0x81, /* Trespass code + Honor reservation bit */
41 0xff, 0xff, /* Trespass target */
42 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
43};
44
45static unsigned char long_trespass_hr[] = {
46 0, 0, 0, 0,
47 CLARIION_TRESPASS_PAGE, /* Page code */
48 0x09, /* Page length - 2 */
49 0x01, /* Trespass code + Honor reservation bit */
50 0xff, 0xff, /* Trespass target */
51 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
52};
53
54static unsigned char short_trespass[] = {
55 0, 0, 0, 0,
56 CLARIION_TRESPASS_PAGE, /* Page code */
57 0x02, /* Page length - 2 */
58 0x81, /* Trespass code + Honor reservation bit */
59 0xff, /* Trespass target */
60};
61
62static unsigned char short_trespass_hr[] = {
63 0, 0, 0, 0,
64 CLARIION_TRESPASS_PAGE, /* Page code */
65 0x02, /* Page length - 2 */
66 0x01, /* Trespass code + Honor reservation bit */
67 0xff, /* Trespass target */
68};
69
70struct clariion_dh_data {
71 /*
72 * Use short trespass command (FC-series) or the long version
73 * (default for AX/CX CLARiiON arrays).
74 */
75 unsigned short_trespass;
76 /*
77 * Whether or not (default) to honor SCSI reservations when
78 * initiating a switch-over.
79 */
80 unsigned hr;
81 /* I/O buffer for both MODE_SELECT and INQUIRY commands. */
82 char buffer[CLARIION_BUFFER_SIZE];
83 /*
84 * SCSI sense buffer for commands -- assumes serial issuance
85 * and completion sequence of all commands for same multipath.
86 */
87 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
88 /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */
89 int default_sp;
90 /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */
91 int current_sp;
92};
93
94static inline struct clariion_dh_data
95 *get_clariion_data(struct scsi_device *sdev)
96{
97 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
98 BUG_ON(scsi_dh_data == NULL);
99 return ((struct clariion_dh_data *) scsi_dh_data->buf);
100}
101
102/*
103 * Parse MODE_SELECT cmd reply.
104 */
105static int trespass_endio(struct scsi_device *sdev, int result)
106{
107 int err = SCSI_DH_OK;
108 struct scsi_sense_hdr sshdr;
109 struct clariion_dh_data *csdev = get_clariion_data(sdev);
110 char *sense = csdev->sense;
111
112 if (status_byte(result) == CHECK_CONDITION &&
113 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
114 sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
115 "0x%2x, 0x%2x while sending CLARiiON trespass "
116 "command.\n", sshdr.sense_key, sshdr.asc,
117 sshdr.ascq);
118
119 if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
120 (sshdr.ascq == 0x00)) {
121 /*
122 * Array based copy in progress -- do not send
123 * mode_select or copy will be aborted mid-stream.
124 */
125 sdev_printk(KERN_INFO, sdev, "Array Based Copy in "
126 "progress while sending CLARiiON trespass "
127 "command.\n");
128 err = SCSI_DH_DEV_TEMP_BUSY;
129 } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
130 (sshdr.ascq == 0x03)) {
131 /*
132 * LUN Not Ready - Manual Intervention Required
133 * indicates in-progress ucode upgrade (NDU).
134 */
135 sdev_printk(KERN_INFO, sdev, "Detected in-progress "
136 "ucode upgrade NDU operation while sending "
137 "CLARiiON trespass command.\n");
138 err = SCSI_DH_DEV_TEMP_BUSY;
139 } else
140 err = SCSI_DH_DEV_FAILED;
141 } else if (result) {
142 sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending "
143 "CLARiiON trespass command.\n", result);
144 err = SCSI_DH_IO;
145 }
146
147 return err;
148}
149
150static int parse_sp_info_reply(struct scsi_device *sdev, int result,
151 int *default_sp, int *current_sp, int *new_current_sp)
152{
153 int err = SCSI_DH_OK;
154 struct clariion_dh_data *csdev = get_clariion_data(sdev);
155
156 if (result == 0) {
157 /* check for in-progress ucode upgrade (NDU) */
158 if (csdev->buffer[48] != 0) {
159 sdev_printk(KERN_NOTICE, sdev, "Detected in-progress "
160 "ucode upgrade NDU operation while finding "
161 "current active SP.");
162 err = SCSI_DH_DEV_TEMP_BUSY;
163 } else {
164 *default_sp = csdev->buffer[5];
165
166 if (csdev->buffer[4] == 2)
167 /* SP for path is current */
168 *current_sp = csdev->buffer[8];
169 else {
170 if (csdev->buffer[4] == 1)
171 /* SP for this path is NOT current */
172 if (csdev->buffer[8] == 0)
173 *current_sp = 1;
174 else
175 *current_sp = 0;
176 else
177 /* unbound LU or LUNZ */
178 *current_sp = CLARIION_UNBOUND_LU;
179 }
180 *new_current_sp = csdev->buffer[8];
181 }
182 } else {
183 struct scsi_sense_hdr sshdr;
184
185 err = SCSI_DH_IO;
186
187 if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
188 &sshdr))
189 sdev_printk(KERN_ERR, sdev, "Found valid sense data "
190 "0x%2x, 0x%2x, 0x%2x while finding current "
191 "active SP.", sshdr.sense_key, sshdr.asc,
192 sshdr.ascq);
193 else
194 sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
195 "current active SP.", result);
196 }
197
198 return err;
199}
200
201static int sp_info_endio(struct scsi_device *sdev, int result,
202 int mode_select_sent, int *done)
203{
204 struct clariion_dh_data *csdev = get_clariion_data(sdev);
205 int err_flags, default_sp, current_sp, new_current_sp;
206
207 err_flags = parse_sp_info_reply(sdev, result, &default_sp,
208 &current_sp, &new_current_sp);
209
210 if (err_flags != SCSI_DH_OK)
211 goto done;
212
213 if (mode_select_sent) {
214 csdev->default_sp = default_sp;
215 csdev->current_sp = current_sp;
216 } else {
217 /*
218 * Issue the actual module_selec request IFF either
219 * (1) we do not know the identity of the current SP OR
220 * (2) what we think we know is actually correct.
221 */
222 if ((current_sp != CLARIION_UNBOUND_LU) &&
223 (new_current_sp != current_sp)) {
224
225 csdev->default_sp = default_sp;
226 csdev->current_sp = current_sp;
227
228 sdev_printk(KERN_INFO, sdev, "Ignoring path group "
229 "switch-over command for CLARiiON SP%s since "
230 " mapped device is already initialized.",
231 current_sp ? "B" : "A");
232 if (done)
233 *done = 1; /* as good as doing it */
234 }
235 }
236done:
237 return err_flags;
238}
239
240/*
241* Get block request for REQ_BLOCK_PC command issued to path. Currently
242* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
243*
244* Uses data and sense buffers in hardware handler context structure and
245* assumes serial servicing of commands, both issuance and completion.
246*/
247static struct request *get_req(struct scsi_device *sdev, int cmd)
248{
249 struct clariion_dh_data *csdev = get_clariion_data(sdev);
250 struct request *rq;
251 unsigned char *page22;
252 int len = 0;
253
254 rq = blk_get_request(sdev->request_queue,
255 (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
256 if (!rq) {
257 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
258 return NULL;
259 }
260
261 memset(&rq->cmd, 0, BLK_MAX_CDB);
262 rq->cmd[0] = cmd;
263 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
264
265 switch (cmd) {
266 case MODE_SELECT:
267 if (csdev->short_trespass) {
268 page22 = csdev->hr ? short_trespass_hr : short_trespass;
269 len = sizeof(short_trespass);
270 } else {
271 page22 = csdev->hr ? long_trespass_hr : long_trespass;
272 len = sizeof(long_trespass);
273 }
274 /*
275 * Can't DMA from kernel BSS -- must copy selected trespass
276 * command mode page contents to context buffer which is
277 * allocated by kmalloc.
278 */
279 BUG_ON((len > CLARIION_BUFFER_SIZE));
280 memcpy(csdev->buffer, page22, len);
281 rq->cmd_flags |= REQ_RW;
282 rq->cmd[1] = 0x10;
283 break;
284 case INQUIRY:
285 rq->cmd[1] = 0x1;
286 rq->cmd[2] = 0xC0;
287 len = CLARIION_BUFFER_SIZE;
288 memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
289 break;
290 default:
291 BUG_ON(1);
292 break;
293 }
294
295 rq->cmd[4] = len;
296 rq->cmd_type = REQ_TYPE_BLOCK_PC;
297 rq->cmd_flags |= REQ_FAILFAST;
298 rq->timeout = CLARIION_TIMEOUT;
299 rq->retries = CLARIION_RETRIES;
300
301 rq->sense = csdev->sense;
302 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
303 rq->sense_len = 0;
304
305 if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
306 len, GFP_ATOMIC)) {
307 __blk_put_request(rq->q, rq);
308 return NULL;
309 }
310
311 return rq;
312}
313
314static int send_cmd(struct scsi_device *sdev, int cmd)
315{
316 struct request *rq = get_req(sdev, cmd);
317
318 if (!rq)
319 return SCSI_DH_RES_TEMP_UNAVAIL;
320
321 return blk_execute_rq(sdev->request_queue, NULL, rq, 1);
322}
323
324static int clariion_activate(struct scsi_device *sdev)
325{
326 int result, done = 0;
327
328 result = send_cmd(sdev, INQUIRY);
329 result = sp_info_endio(sdev, result, 0, &done);
330 if (result || done)
331 goto done;
332
333 result = send_cmd(sdev, MODE_SELECT);
334 result = trespass_endio(sdev, result);
335 if (result)
336 goto done;
337
338 result = send_cmd(sdev, INQUIRY);
339 result = sp_info_endio(sdev, result, 1, NULL);
340done:
341 return result;
342}
343
344static int clariion_check_sense(struct scsi_device *sdev,
345 struct scsi_sense_hdr *sense_hdr)
346{
347 switch (sense_hdr->sense_key) {
348 case NOT_READY:
349 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03)
350 /*
351 * LUN Not Ready - Manual Intervention Required
352 * indicates this is a passive path.
353 *
354 * FIXME: However, if this is seen and EVPD C0
355 * indicates that this is due to a NDU in
356 * progress, we should set FAIL_PATH too.
357 * This indicates we might have to do a SCSI
358 * inquiry in the end_io path. Ugh.
359 *
360 * Can return FAILED only when we want the error
361 * recovery process to kick in.
362 */
363 return SUCCESS;
364 break;
365 case ILLEGAL_REQUEST:
366 if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01)
367 /*
368 * An array based copy is in progress. Do not
369 * fail the path, do not bypass to another PG,
370 * do not retry. Fail the IO immediately.
371 * (Actually this is the same conclusion as in
372 * the default handler, but lets make sure.)
373 *
374 * Can return FAILED only when we want the error
375 * recovery process to kick in.
376 */
377 return SUCCESS;
378 break;
379 case UNIT_ATTENTION:
380 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
381 /*
382 * Unit Attention Code. This is the first IO
383 * to the new path, so just retry.
384 */
385 return NEEDS_RETRY;
386 break;
387 }
388
389 /* success just means we do not care what scsi-ml does */
390 return SUCCESS;
391}
392
393static const struct {
394 char *vendor;
395 char *model;
396} clariion_dev_list[] = {
397 {"DGC", "RAID"},
398 {"DGC", "DISK"},
399 {NULL, NULL},
400};
401
402static int clariion_bus_notify(struct notifier_block *, unsigned long, void *);
403
404static struct scsi_device_handler clariion_dh = {
405 .name = CLARIION_NAME,
406 .module = THIS_MODULE,
407 .nb.notifier_call = clariion_bus_notify,
408 .check_sense = clariion_check_sense,
409 .activate = clariion_activate,
410};
411
412/*
413 * TODO: need some interface so we can set trespass values
414 */
415static int clariion_bus_notify(struct notifier_block *nb,
416 unsigned long action, void *data)
417{
418 struct device *dev = data;
419 struct scsi_device *sdev;
420 struct scsi_dh_data *scsi_dh_data;
421 struct clariion_dh_data *h;
422 int i, found = 0;
423 unsigned long flags;
424
425 if (!scsi_is_sdev_device(dev))
426 return 0;
427
428 sdev = to_scsi_device(dev);
429
430 if (action == BUS_NOTIFY_ADD_DEVICE) {
431 for (i = 0; clariion_dev_list[i].vendor; i++) {
432 if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
433 strlen(clariion_dev_list[i].vendor)) &&
434 !strncmp(sdev->model, clariion_dev_list[i].model,
435 strlen(clariion_dev_list[i].model))) {
436 found = 1;
437 break;
438 }
439 }
440 if (!found)
441 goto out;
442
443 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
444 + sizeof(*h) , GFP_KERNEL);
445 if (!scsi_dh_data) {
446 sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
447 CLARIION_NAME);
448 goto out;
449 }
450
451 scsi_dh_data->scsi_dh = &clariion_dh;
452 h = (struct clariion_dh_data *) scsi_dh_data->buf;
453 h->default_sp = CLARIION_UNBOUND_LU;
454 h->current_sp = CLARIION_UNBOUND_LU;
455
456 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
457 sdev->scsi_dh_data = scsi_dh_data;
458 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
459
460 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME);
461 try_module_get(THIS_MODULE);
462
463 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
464 if (sdev->scsi_dh_data == NULL ||
465 sdev->scsi_dh_data->scsi_dh != &clariion_dh)
466 goto out;
467
468 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
469 scsi_dh_data = sdev->scsi_dh_data;
470 sdev->scsi_dh_data = NULL;
471 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
472
473 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n",
474 CLARIION_NAME);
475
476 kfree(scsi_dh_data);
477 module_put(THIS_MODULE);
478 }
479
480out:
481 return 0;
482}
483
484static int __init clariion_init(void)
485{
486 int r;
487
488 r = scsi_register_device_handler(&clariion_dh);
489 if (r != 0)
490 printk(KERN_ERR "Failed to register scsi device handler.");
491 return r;
492}
493
494static void __exit clariion_exit(void)
495{
496 scsi_unregister_device_handler(&clariion_dh);
497}
498
499module_init(clariion_init);
500module_exit(clariion_exit);
501
502MODULE_DESCRIPTION("EMC CX/AX/FC-family driver");
503MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>");
504MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
new file mode 100644
index 000000000000..ae6be87d6a83
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -0,0 +1,207 @@
1/*
2 * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be
3 * upgraded.
4 *
5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2006 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_dbg.h>
25#include <scsi/scsi_eh.h>
26#include <scsi/scsi_dh.h>
27
28#define HP_SW_NAME "hp_sw"
29
30#define HP_SW_TIMEOUT (60 * HZ)
31#define HP_SW_RETRIES 3
32
33struct hp_sw_dh_data {
34 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
35 int retries;
36};
37
38static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
39{
40 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
41 BUG_ON(scsi_dh_data == NULL);
42 return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
43}
44
45static int hp_sw_done(struct scsi_device *sdev)
46{
47 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
48 struct scsi_sense_hdr sshdr;
49 int rc;
50
51 sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
52
53 rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
54 if (!rc)
55 goto done;
56 switch (sshdr.sense_key) {
57 case NOT_READY:
58 if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
59 rc = SCSI_DH_RETRY;
60 h->retries++;
61 break;
62 }
63 /* fall through */
64 default:
65 h->retries++;
66 rc = SCSI_DH_IMM_RETRY;
67 }
68
69done:
70 if (rc == SCSI_DH_OK || rc == SCSI_DH_IO)
71 h->retries = 0;
72 else if (h->retries > HP_SW_RETRIES) {
73 h->retries = 0;
74 rc = SCSI_DH_IO;
75 }
76 return rc;
77}
78
79static int hp_sw_activate(struct scsi_device *sdev)
80{
81 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
82 struct request *req;
83 int ret = SCSI_DH_RES_TEMP_UNAVAIL;
84
85 req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC);
86 if (!req)
87 goto done;
88
89 sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
90
91 req->cmd_type = REQ_TYPE_BLOCK_PC;
92 req->cmd_flags |= REQ_FAILFAST;
93 req->cmd_len = COMMAND_SIZE(START_STOP);
94 memset(req->cmd, 0, MAX_COMMAND_SIZE);
95 req->cmd[0] = START_STOP;
96 req->cmd[4] = 1; /* Start spin cycle */
97 req->timeout = HP_SW_TIMEOUT;
98 req->sense = h->sense;
99 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
100 req->sense_len = 0;
101
102 ret = blk_execute_rq(req->q, NULL, req, 1);
103 if (!ret) /* SUCCESS */
104 ret = hp_sw_done(sdev);
105 else
106 ret = SCSI_DH_IO;
107done:
108 return ret;
109}
110
111static const struct {
112 char *vendor;
113 char *model;
114} hp_sw_dh_data_list[] = {
115 {"COMPAQ", "MSA"},
116 {"HP", "HSV"},
117 {"DEC", "HSG80"},
118 {NULL, NULL},
119};
120
121static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *);
122
123static struct scsi_device_handler hp_sw_dh = {
124 .name = HP_SW_NAME,
125 .module = THIS_MODULE,
126 .nb.notifier_call = hp_sw_bus_notify,
127 .activate = hp_sw_activate,
128};
129
130static int hp_sw_bus_notify(struct notifier_block *nb,
131 unsigned long action, void *data)
132{
133 struct device *dev = data;
134 struct scsi_device *sdev;
135 struct scsi_dh_data *scsi_dh_data;
136 int i, found = 0;
137 unsigned long flags;
138
139 if (!scsi_is_sdev_device(dev))
140 return 0;
141
142 sdev = to_scsi_device(dev);
143
144 if (action == BUS_NOTIFY_ADD_DEVICE) {
145 for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
146 if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
147 strlen(hp_sw_dh_data_list[i].vendor)) &&
148 !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
149 strlen(hp_sw_dh_data_list[i].model))) {
150 found = 1;
151 break;
152 }
153 }
154 if (!found)
155 goto out;
156
157 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
158 + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
159 if (!scsi_dh_data) {
160 sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
161 HP_SW_NAME);
162 goto out;
163 }
164
165 scsi_dh_data->scsi_dh = &hp_sw_dh;
166 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
167 sdev->scsi_dh_data = scsi_dh_data;
168 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
169 try_module_get(THIS_MODULE);
170
171 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME);
172 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
173 if (sdev->scsi_dh_data == NULL ||
174 sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
175 goto out;
176
177 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
178 scsi_dh_data = sdev->scsi_dh_data;
179 sdev->scsi_dh_data = NULL;
180 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
181 module_put(THIS_MODULE);
182
183 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME);
184
185 kfree(scsi_dh_data);
186 }
187
188out:
189 return 0;
190}
191
192static int __init hp_sw_init(void)
193{
194 return scsi_register_device_handler(&hp_sw_dh);
195}
196
197static void __exit hp_sw_exit(void)
198{
199 scsi_unregister_device_handler(&hp_sw_dh);
200}
201
202module_init(hp_sw_init);
203module_exit(hp_sw_exit);
204
205MODULE_DESCRIPTION("HP MSA 1000");
206MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
207MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
new file mode 100644
index 000000000000..fdf34b0ec6e1
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -0,0 +1,696 @@
1/*
2 * Engenio/LSI RDAC SCSI Device Handler
3 *
4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22#include <scsi/scsi.h>
23#include <scsi/scsi_eh.h>
24#include <scsi/scsi_dh.h>
25
26#define RDAC_NAME "rdac"
27
28/*
29 * LSI mode page stuff
30 *
31 * These struct definitions and the forming of the
32 * mode page were taken from the LSI RDAC 2.4 GPL'd
33 * driver, and then converted to Linux conventions.
34 */
35#define RDAC_QUIESCENCE_TIME 20;
36/*
37 * Page Codes
38 */
39#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
40
41/*
42 * Controller modes definitions
43 */
44#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
45
46/*
47 * RDAC Options field
48 */
49#define RDAC_FORCED_QUIESENCE 0x02
50
51#define RDAC_TIMEOUT (60 * HZ)
52#define RDAC_RETRIES 3
53
54struct rdac_mode_6_hdr {
55 u8 data_len;
56 u8 medium_type;
57 u8 device_params;
58 u8 block_desc_len;
59};
60
61struct rdac_mode_10_hdr {
62 u16 data_len;
63 u8 medium_type;
64 u8 device_params;
65 u16 reserved;
66 u16 block_desc_len;
67};
68
69struct rdac_mode_common {
70 u8 controller_serial[16];
71 u8 alt_controller_serial[16];
72 u8 rdac_mode[2];
73 u8 alt_rdac_mode[2];
74 u8 quiescence_timeout;
75 u8 rdac_options;
76};
77
78struct rdac_pg_legacy {
79 struct rdac_mode_6_hdr hdr;
80 u8 page_code;
81 u8 page_len;
82 struct rdac_mode_common common;
83#define MODE6_MAX_LUN 32
84 u8 lun_table[MODE6_MAX_LUN];
85 u8 reserved2[32];
86 u8 reserved3;
87 u8 reserved4;
88};
89
90struct rdac_pg_expanded {
91 struct rdac_mode_10_hdr hdr;
92 u8 page_code;
93 u8 subpage_code;
94 u8 page_len[2];
95 struct rdac_mode_common common;
96 u8 lun_table[256];
97 u8 reserved3;
98 u8 reserved4;
99};
100
101struct c9_inquiry {
102 u8 peripheral_info;
103 u8 page_code; /* 0xC9 */
104 u8 reserved1;
105 u8 page_len;
106 u8 page_id[4]; /* "vace" */
107 u8 avte_cvp;
108 u8 path_prio;
109 u8 reserved2[38];
110};
111
112#define SUBSYS_ID_LEN 16
113#define SLOT_ID_LEN 2
114
115struct c4_inquiry {
116 u8 peripheral_info;
117 u8 page_code; /* 0xC4 */
118 u8 reserved1;
119 u8 page_len;
120 u8 page_id[4]; /* "subs" */
121 u8 subsys_id[SUBSYS_ID_LEN];
122 u8 revision[4];
123 u8 slot_id[SLOT_ID_LEN];
124 u8 reserved[2];
125};
126
127struct rdac_controller {
128 u8 subsys_id[SUBSYS_ID_LEN];
129 u8 slot_id[SLOT_ID_LEN];
130 int use_ms10;
131 struct kref kref;
132 struct list_head node; /* list of all controllers */
133 union {
134 struct rdac_pg_legacy legacy;
135 struct rdac_pg_expanded expanded;
136 } mode_select;
137};
138struct c8_inquiry {
139 u8 peripheral_info;
140 u8 page_code; /* 0xC8 */
141 u8 reserved1;
142 u8 page_len;
143 u8 page_id[4]; /* "edid" */
144 u8 reserved2[3];
145 u8 vol_uniq_id_len;
146 u8 vol_uniq_id[16];
147 u8 vol_user_label_len;
148 u8 vol_user_label[60];
149 u8 array_uniq_id_len;
150 u8 array_unique_id[16];
151 u8 array_user_label_len;
152 u8 array_user_label[60];
153 u8 lun[8];
154};
155
156struct c2_inquiry {
157 u8 peripheral_info;
158 u8 page_code; /* 0xC2 */
159 u8 reserved1;
160 u8 page_len;
161 u8 page_id[4]; /* "swr4" */
162 u8 sw_version[3];
163 u8 sw_date[3];
164 u8 features_enabled;
165 u8 max_lun_supported;
166 u8 partitions[239]; /* Total allocation length should be 0xFF */
167};
168
169struct rdac_dh_data {
170 struct rdac_controller *ctlr;
171#define UNINITIALIZED_LUN (1 << 8)
172 unsigned lun;
173#define RDAC_STATE_ACTIVE 0
174#define RDAC_STATE_PASSIVE 1
175 unsigned char state;
176 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
177 union {
178 struct c2_inquiry c2;
179 struct c4_inquiry c4;
180 struct c8_inquiry c8;
181 struct c9_inquiry c9;
182 } inq;
183};
184
185static LIST_HEAD(ctlr_list);
186static DEFINE_SPINLOCK(list_lock);
187
188static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
189{
190 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
191 BUG_ON(scsi_dh_data == NULL);
192 return ((struct rdac_dh_data *) scsi_dh_data->buf);
193}
194
195static struct request *get_rdac_req(struct scsi_device *sdev,
196 void *buffer, unsigned buflen, int rw)
197{
198 struct request *rq;
199 struct request_queue *q = sdev->request_queue;
200 struct rdac_dh_data *h = get_rdac_data(sdev);
201
202 rq = blk_get_request(q, rw, GFP_KERNEL);
203
204 if (!rq) {
205 sdev_printk(KERN_INFO, sdev,
206 "get_rdac_req: blk_get_request failed.\n");
207 return NULL;
208 }
209
210 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
211 blk_put_request(rq);
212 sdev_printk(KERN_INFO, sdev,
213 "get_rdac_req: blk_rq_map_kern failed.\n");
214 return NULL;
215 }
216
217 memset(&rq->cmd, 0, BLK_MAX_CDB);
218 rq->sense = h->sense;
219 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
220 rq->sense_len = 0;
221
222 rq->cmd_type = REQ_TYPE_BLOCK_PC;
223 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
224 rq->retries = RDAC_RETRIES;
225 rq->timeout = RDAC_TIMEOUT;
226
227 return rq;
228}
229
230static struct request *rdac_failover_get(struct scsi_device *sdev)
231{
232 struct request *rq;
233 struct rdac_mode_common *common;
234 unsigned data_size;
235 struct rdac_dh_data *h = get_rdac_data(sdev);
236
237 if (h->ctlr->use_ms10) {
238 struct rdac_pg_expanded *rdac_pg;
239
240 data_size = sizeof(struct rdac_pg_expanded);
241 rdac_pg = &h->ctlr->mode_select.expanded;
242 memset(rdac_pg, 0, data_size);
243 common = &rdac_pg->common;
244 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
245 rdac_pg->subpage_code = 0x1;
246 rdac_pg->page_len[0] = 0x01;
247 rdac_pg->page_len[1] = 0x28;
248 rdac_pg->lun_table[h->lun] = 0x81;
249 } else {
250 struct rdac_pg_legacy *rdac_pg;
251
252 data_size = sizeof(struct rdac_pg_legacy);
253 rdac_pg = &h->ctlr->mode_select.legacy;
254 memset(rdac_pg, 0, data_size);
255 common = &rdac_pg->common;
256 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
257 rdac_pg->page_len = 0x68;
258 rdac_pg->lun_table[h->lun] = 0x81;
259 }
260 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
261 common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
262 common->rdac_options = RDAC_FORCED_QUIESENCE;
263
264 /* get request for block layer packet command */
265 rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
266 if (!rq)
267 return NULL;
268
269 /* Prepare the command. */
270 if (h->ctlr->use_ms10) {
271 rq->cmd[0] = MODE_SELECT_10;
272 rq->cmd[7] = data_size >> 8;
273 rq->cmd[8] = data_size & 0xff;
274 } else {
275 rq->cmd[0] = MODE_SELECT;
276 rq->cmd[4] = data_size;
277 }
278 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
279
280 return rq;
281}
282
283static void release_controller(struct kref *kref)
284{
285 struct rdac_controller *ctlr;
286 ctlr = container_of(kref, struct rdac_controller, kref);
287
288 spin_lock(&list_lock);
289 list_del(&ctlr->node);
290 spin_unlock(&list_lock);
291 kfree(ctlr);
292}
293
294static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
295{
296 struct rdac_controller *ctlr, *tmp;
297
298 spin_lock(&list_lock);
299
300 list_for_each_entry(tmp, &ctlr_list, node) {
301 if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
302 (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
303 kref_get(&tmp->kref);
304 spin_unlock(&list_lock);
305 return tmp;
306 }
307 }
308 ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
309 if (!ctlr)
310 goto done;
311
312 /* initialize fields of controller */
313 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
314 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
315 kref_init(&ctlr->kref);
316 ctlr->use_ms10 = -1;
317 list_add(&ctlr->node, &ctlr_list);
318done:
319 spin_unlock(&list_lock);
320 return ctlr;
321}
322
323static int submit_inquiry(struct scsi_device *sdev, int page_code,
324 unsigned int len)
325{
326 struct request *rq;
327 struct request_queue *q = sdev->request_queue;
328 struct rdac_dh_data *h = get_rdac_data(sdev);
329 int err = SCSI_DH_RES_TEMP_UNAVAIL;
330
331 rq = get_rdac_req(sdev, &h->inq, len, READ);
332 if (!rq)
333 goto done;
334
335 /* Prepare the command. */
336 rq->cmd[0] = INQUIRY;
337 rq->cmd[1] = 1;
338 rq->cmd[2] = page_code;
339 rq->cmd[4] = len;
340 rq->cmd_len = COMMAND_SIZE(INQUIRY);
341 err = blk_execute_rq(q, NULL, rq, 1);
342 if (err == -EIO)
343 err = SCSI_DH_IO;
344done:
345 return err;
346}
347
348static int get_lun(struct scsi_device *sdev)
349{
350 int err;
351 struct c8_inquiry *inqp;
352 struct rdac_dh_data *h = get_rdac_data(sdev);
353
354 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry));
355 if (err == SCSI_DH_OK) {
356 inqp = &h->inq.c8;
357 h->lun = inqp->lun[7]; /* currently it uses only one byte */
358 }
359 return err;
360}
361
362#define RDAC_OWNED 0
363#define RDAC_UNOWNED 1
364#define RDAC_FAILED 2
365static int check_ownership(struct scsi_device *sdev)
366{
367 int err;
368 struct c9_inquiry *inqp;
369 struct rdac_dh_data *h = get_rdac_data(sdev);
370
371 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry));
372 if (err == SCSI_DH_OK) {
373 err = RDAC_UNOWNED;
374 inqp = &h->inq.c9;
375 /*
376 * If in AVT mode or if the path already owns the LUN,
377 * return RDAC_OWNED;
378 */
379 if (((inqp->avte_cvp >> 7) == 0x1) ||
380 ((inqp->avte_cvp & 0x1) != 0))
381 err = RDAC_OWNED;
382 } else
383 err = RDAC_FAILED;
384 return err;
385}
386
387static int initialize_controller(struct scsi_device *sdev)
388{
389 int err;
390 struct c4_inquiry *inqp;
391 struct rdac_dh_data *h = get_rdac_data(sdev);
392
393 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry));
394 if (err == SCSI_DH_OK) {
395 inqp = &h->inq.c4;
396 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
397 if (!h->ctlr)
398 err = SCSI_DH_RES_TEMP_UNAVAIL;
399 }
400 return err;
401}
402
403static int set_mode_select(struct scsi_device *sdev)
404{
405 int err;
406 struct c2_inquiry *inqp;
407 struct rdac_dh_data *h = get_rdac_data(sdev);
408
409 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry));
410 if (err == SCSI_DH_OK) {
411 inqp = &h->inq.c2;
412 /*
413 * If more than MODE6_MAX_LUN luns are supported, use
414 * mode select 10
415 */
416 if (inqp->max_lun_supported >= MODE6_MAX_LUN)
417 h->ctlr->use_ms10 = 1;
418 else
419 h->ctlr->use_ms10 = 0;
420 }
421 return err;
422}
423
424static int mode_select_handle_sense(struct scsi_device *sdev)
425{
426 struct scsi_sense_hdr sense_hdr;
427 struct rdac_dh_data *h = get_rdac_data(sdev);
428 int sense, err = SCSI_DH_IO, ret;
429
430 ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
431 if (!ret)
432 goto done;
433
434 err = SCSI_DH_OK;
435 sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
436 sense_hdr.ascq;
437 /* If it is retryable failure, submit the c9 inquiry again */
438 if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
439 sense == 0x62900) {
440 /* 0x59136 - Command lock contention
441 * 0x[6b]8b02 - Quiesense in progress or achieved
442 * 0x62900 - Power On, Reset, or Bus Device Reset
443 */
444 err = SCSI_DH_RETRY;
445 }
446
447 if (sense)
448 sdev_printk(KERN_INFO, sdev,
449 "MODE_SELECT failed with sense 0x%x.\n", sense);
450done:
451 return err;
452}
453
454static int send_mode_select(struct scsi_device *sdev)
455{
456 struct request *rq;
457 struct request_queue *q = sdev->request_queue;
458 struct rdac_dh_data *h = get_rdac_data(sdev);
459 int err = SCSI_DH_RES_TEMP_UNAVAIL;
460
461 rq = rdac_failover_get(sdev);
462 if (!rq)
463 goto done;
464
465 sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n");
466
467 err = blk_execute_rq(q, NULL, rq, 1);
468 if (err != SCSI_DH_OK)
469 err = mode_select_handle_sense(sdev);
470 if (err == SCSI_DH_OK)
471 h->state = RDAC_STATE_ACTIVE;
472done:
473 return err;
474}
475
476static int rdac_activate(struct scsi_device *sdev)
477{
478 struct rdac_dh_data *h = get_rdac_data(sdev);
479 int err = SCSI_DH_OK;
480
481 if (h->lun == UNINITIALIZED_LUN) {
482 err = get_lun(sdev);
483 if (err != SCSI_DH_OK)
484 goto done;
485 }
486
487 err = check_ownership(sdev);
488 switch (err) {
489 case RDAC_UNOWNED:
490 break;
491 case RDAC_OWNED:
492 err = SCSI_DH_OK;
493 goto done;
494 case RDAC_FAILED:
495 default:
496 err = SCSI_DH_IO;
497 goto done;
498 }
499
500 if (!h->ctlr) {
501 err = initialize_controller(sdev);
502 if (err != SCSI_DH_OK)
503 goto done;
504 }
505
506 if (h->ctlr->use_ms10 == -1) {
507 err = set_mode_select(sdev);
508 if (err != SCSI_DH_OK)
509 goto done;
510 }
511
512 err = send_mode_select(sdev);
513done:
514 return err;
515}
516
517static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
518{
519 struct rdac_dh_data *h = get_rdac_data(sdev);
520 int ret = BLKPREP_OK;
521
522 if (h->state != RDAC_STATE_ACTIVE) {
523 ret = BLKPREP_KILL;
524 req->cmd_flags |= REQ_QUIET;
525 }
526 return ret;
527
528}
529
530static int rdac_check_sense(struct scsi_device *sdev,
531 struct scsi_sense_hdr *sense_hdr)
532{
533 struct rdac_dh_data *h = get_rdac_data(sdev);
534 switch (sense_hdr->sense_key) {
535 case NOT_READY:
536 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
537 /* LUN Not Ready - Storage firmware incompatible
538 * Manual code synchonisation required.
539 *
540 * Nothing we can do here. Try to bypass the path.
541 */
542 return SUCCESS;
543 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
544 /* LUN Not Ready - Quiescense in progress
545 *
546 * Just retry and wait.
547 */
548 return NEEDS_RETRY;
549 break;
550 case ILLEGAL_REQUEST:
551 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
552 /* Invalid Request - Current Logical Unit Ownership.
553 * Controller is not the current owner of the LUN,
554 * Fail the path, so that the other path be used.
555 */
556 h->state = RDAC_STATE_PASSIVE;
557 return SUCCESS;
558 }
559 break;
560 case UNIT_ATTENTION:
561 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
562 /*
563 * Power On, Reset, or Bus Device Reset, just retry.
564 */
565 return NEEDS_RETRY;
566 break;
567 }
568 /* success just means we do not care what scsi-ml does */
569 return SCSI_RETURN_NOT_HANDLED;
570}
571
572static const struct {
573 char *vendor;
574 char *model;
575} rdac_dev_list[] = {
576 {"IBM", "1722"},
577 {"IBM", "1724"},
578 {"IBM", "1726"},
579 {"IBM", "1742"},
580 {"IBM", "1814"},
581 {"IBM", "1815"},
582 {"IBM", "1818"},
583 {"IBM", "3526"},
584 {"SGI", "TP9400"},
585 {"SGI", "TP9500"},
586 {"SGI", "IS"},
587 {"STK", "OPENstorage D280"},
588 {"SUN", "CSM200_R"},
589 {"SUN", "LCSM100_F"},
590 {NULL, NULL},
591};
592
593static int rdac_bus_notify(struct notifier_block *, unsigned long, void *);
594
595static struct scsi_device_handler rdac_dh = {
596 .name = RDAC_NAME,
597 .module = THIS_MODULE,
598 .nb.notifier_call = rdac_bus_notify,
599 .prep_fn = rdac_prep_fn,
600 .check_sense = rdac_check_sense,
601 .activate = rdac_activate,
602};
603
604/*
605 * TODO: need some interface so we can set trespass values
606 */
607static int rdac_bus_notify(struct notifier_block *nb,
608 unsigned long action, void *data)
609{
610 struct device *dev = data;
611 struct scsi_device *sdev;
612 struct scsi_dh_data *scsi_dh_data;
613 struct rdac_dh_data *h;
614 int i, found = 0;
615 unsigned long flags;
616
617 if (!scsi_is_sdev_device(dev))
618 return 0;
619
620 sdev = to_scsi_device(dev);
621
622 if (action == BUS_NOTIFY_ADD_DEVICE) {
623 for (i = 0; rdac_dev_list[i].vendor; i++) {
624 if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
625 strlen(rdac_dev_list[i].vendor)) &&
626 !strncmp(sdev->model, rdac_dev_list[i].model,
627 strlen(rdac_dev_list[i].model))) {
628 found = 1;
629 break;
630 }
631 }
632 if (!found)
633 goto out;
634
635 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
636 + sizeof(*h) , GFP_KERNEL);
637 if (!scsi_dh_data) {
638 sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
639 RDAC_NAME);
640 goto out;
641 }
642
643 scsi_dh_data->scsi_dh = &rdac_dh;
644 h = (struct rdac_dh_data *) scsi_dh_data->buf;
645 h->lun = UNINITIALIZED_LUN;
646 h->state = RDAC_STATE_ACTIVE;
647 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
648 sdev->scsi_dh_data = scsi_dh_data;
649 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
650 try_module_get(THIS_MODULE);
651
652 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME);
653
654 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
655 if (sdev->scsi_dh_data == NULL ||
656 sdev->scsi_dh_data->scsi_dh != &rdac_dh)
657 goto out;
658
659 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
660 scsi_dh_data = sdev->scsi_dh_data;
661 sdev->scsi_dh_data = NULL;
662 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
663
664 h = (struct rdac_dh_data *) scsi_dh_data->buf;
665 if (h->ctlr)
666 kref_put(&h->ctlr->kref, release_controller);
667 kfree(scsi_dh_data);
668 module_put(THIS_MODULE);
669 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
670 }
671
672out:
673 return 0;
674}
675
676static int __init rdac_init(void)
677{
678 int r;
679
680 r = scsi_register_device_handler(&rdac_dh);
681 if (r != 0)
682 printk(KERN_ERR "Failed to register scsi device handler.");
683 return r;
684}
685
686static void __exit rdac_exit(void)
687{
688 scsi_unregister_device_handler(&rdac_dh);
689}
690
691module_init(rdac_init);
692module_exit(rdac_exit);
693
694MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
695MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
696MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 59fbef08d690..62a4618530d0 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -219,19 +219,10 @@ static void esp_reset_esp(struct esp *esp)
219 /* Now reset the ESP chip */ 219 /* Now reset the ESP chip */
220 scsi_esp_cmd(esp, ESP_CMD_RC); 220 scsi_esp_cmd(esp, ESP_CMD_RC);
221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 221 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
222 if (esp->rev == FAST)
223 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
222 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); 224 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
223 225
224 /* Reload the configuration registers */
225 esp_write8(esp->cfact, ESP_CFACT);
226
227 esp->prev_stp = 0;
228 esp_write8(esp->prev_stp, ESP_STP);
229
230 esp->prev_soff = 0;
231 esp_write8(esp->prev_soff, ESP_SOFF);
232
233 esp_write8(esp->neg_defp, ESP_TIMEO);
234
235 /* This is the only point at which it is reliable to read 226 /* This is the only point at which it is reliable to read
236 * the ID-code for a fast ESP chip variants. 227 * the ID-code for a fast ESP chip variants.
237 */ 228 */
@@ -316,6 +307,17 @@ static void esp_reset_esp(struct esp *esp)
316 break; 307 break;
317 } 308 }
318 309
310 /* Reload the configuration registers */
311 esp_write8(esp->cfact, ESP_CFACT);
312
313 esp->prev_stp = 0;
314 esp_write8(esp->prev_stp, ESP_STP);
315
316 esp->prev_soff = 0;
317 esp_write8(esp->prev_soff, ESP_SOFF);
318
319 esp_write8(esp->neg_defp, ESP_TIMEO);
320
319 /* Eat any bitrot in the chip */ 321 /* Eat any bitrot in the chip */
320 esp_read8(ESP_INTRPT); 322 esp_read8(ESP_INTRPT);
321 udelay(100); 323 udelay(100);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index c6457bfc8a49..35cd892dce04 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -290,7 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
290 kfree(shost); 290 kfree(shost);
291} 291}
292 292
293struct device_type scsi_host_type = { 293static struct device_type scsi_host_type = {
294 .name = "scsi_host", 294 .name = "scsi_host",
295 .release = scsi_host_dev_release, 295 .release = scsi_host_dev_release,
296}; 296};
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index 6ac0633d5452..a423d9633625 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -5,3 +5,4 @@ ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
5ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o 5ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
6 6
7obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o 7obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
8obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
new file mode 100644
index 000000000000..eb702b96d57c
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -0,0 +1,3910 @@
1/*
2 * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
3 *
4 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) IBM Corporation, 2008
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/kthread.h>
31#include <linux/of.h>
32#include <linux/stringify.h>
33#include <asm/firmware.h>
34#include <asm/irq.h>
35#include <asm/vio.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_cmnd.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_tcq.h>
41#include <scsi/scsi_transport_fc.h>
42#include "ibmvfc.h"
43
44static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
45static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
46static unsigned int max_lun = IBMVFC_MAX_LUN;
47static unsigned int max_targets = IBMVFC_MAX_TARGETS;
48static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
49static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
50static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
51static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
52static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
53static LIST_HEAD(ibmvfc_head);
54static DEFINE_SPINLOCK(ibmvfc_driver_lock);
55static struct scsi_transport_template *ibmvfc_transport_template;
56
57MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
58MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
59MODULE_LICENSE("GPL");
60MODULE_VERSION(IBMVFC_DRIVER_VERSION);
61
62module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
63MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
64 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
65module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
66MODULE_PARM_DESC(default_timeout,
67 "Default timeout in seconds for initialization and EH commands. "
68 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
69module_param_named(max_requests, max_requests, uint, S_IRUGO);
70MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
71 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
72module_param_named(max_lun, max_lun, uint, S_IRUGO);
73MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
74 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
75module_param_named(max_targets, max_targets, uint, S_IRUGO);
76MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
77 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
78module_param_named(disc_threads, disc_threads, uint, S_IRUGO | S_IWUSR);
79MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
80 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
81module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
82MODULE_PARM_DESC(debug, "Enable driver debug information. "
83 "[Default=" __stringify(IBMVFC_DEBUG) "]");
84module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
86 "transport should insulate the loss of a remote port. Once this "
87 "value is exceeded, the scsi target is removed. "
88 "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
89module_param_named(log_level, log_level, uint, 0);
90MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
91 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
92
93static const struct {
94 u16 status;
95 u16 error;
96 u8 result;
97 u8 retry;
98 int log;
99 char *name;
100} cmd_status [] = {
101 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
102 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
103 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
104 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" },
105 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
106 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
107 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
108 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
109 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
115
116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
117 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
118 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" },
119 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" },
120 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
124 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
125
126 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
127 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
128 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
129 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
130 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
131 { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
132 { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
133 { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
134 { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
135 { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
136 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
137
138 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
139};
140
141static void ibmvfc_npiv_login(struct ibmvfc_host *);
142static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
143static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
144static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
145
146static const char *unknown_error = "unknown error";
147
148#ifdef CONFIG_SCSI_IBMVFC_TRACE
149/**
150 * ibmvfc_trc_start - Log a start trace entry
151 * @evt: ibmvfc event struct
152 *
153 **/
154static void ibmvfc_trc_start(struct ibmvfc_event *evt)
155{
156 struct ibmvfc_host *vhost = evt->vhost;
157 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
158 struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
159 struct ibmvfc_trace_entry *entry;
160
161 entry = &vhost->trace[vhost->trace_index++];
162 entry->evt = evt;
163 entry->time = jiffies;
164 entry->fmt = evt->crq.format;
165 entry->type = IBMVFC_TRC_START;
166
167 switch (entry->fmt) {
168 case IBMVFC_CMD_FORMAT:
169 entry->op_code = vfc_cmd->iu.cdb[0];
170 entry->scsi_id = vfc_cmd->tgt_scsi_id;
171 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
172 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
173 entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
174 break;
175 case IBMVFC_MAD_FORMAT:
176 entry->op_code = mad->opcode;
177 break;
178 default:
179 break;
180 };
181}
182
183/**
184 * ibmvfc_trc_end - Log an end trace entry
185 * @evt: ibmvfc event struct
186 *
187 **/
188static void ibmvfc_trc_end(struct ibmvfc_event *evt)
189{
190 struct ibmvfc_host *vhost = evt->vhost;
191 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
192 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
193 struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
194
195 entry->evt = evt;
196 entry->time = jiffies;
197 entry->fmt = evt->crq.format;
198 entry->type = IBMVFC_TRC_END;
199
200 switch (entry->fmt) {
201 case IBMVFC_CMD_FORMAT:
202 entry->op_code = vfc_cmd->iu.cdb[0];
203 entry->scsi_id = vfc_cmd->tgt_scsi_id;
204 entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
205 entry->tmf_flags = vfc_cmd->iu.tmf_flags;
206 entry->u.end.status = vfc_cmd->status;
207 entry->u.end.error = vfc_cmd->error;
208 entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
209 entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
210 entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
211 break;
212 case IBMVFC_MAD_FORMAT:
213 entry->op_code = mad->opcode;
214 entry->u.end.status = mad->status;
215 break;
216 default:
217 break;
218
219 };
220}
221
222#else
223#define ibmvfc_trc_start(evt) do { } while (0)
224#define ibmvfc_trc_end(evt) do { } while (0)
225#endif
226
227/**
228 * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
229 * @status: status / error class
230 * @error: error
231 *
232 * Return value:
233 * index into cmd_status / -EINVAL on failure
234 **/
235static int ibmvfc_get_err_index(u16 status, u16 error)
236{
237 int i;
238
239 for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
240 if ((cmd_status[i].status & status) == cmd_status[i].status &&
241 cmd_status[i].error == error)
242 return i;
243
244 return -EINVAL;
245}
246
247/**
248 * ibmvfc_get_cmd_error - Find the error description for the fcp response
249 * @status: status / error class
250 * @error: error
251 *
252 * Return value:
253 * error description string
254 **/
255static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
256{
257 int rc = ibmvfc_get_err_index(status, error);
258 if (rc >= 0)
259 return cmd_status[rc].name;
260 return unknown_error;
261}
262
263/**
264 * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
265 * @vfc_cmd: ibmvfc command struct
266 *
267 * Return value:
268 * SCSI result value to return for completed command
269 **/
270static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
271{
272 int err;
273 struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
274 int fc_rsp_len = rsp->fcp_rsp_len;
275
276 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
277 ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
278 rsp->data.info.rsp_code))
279 return DID_ERROR << 16;
280
281 if (!vfc_cmd->status) {
282 if (rsp->flags & FCP_RESID_OVER)
283 return rsp->scsi_status | (DID_ERROR << 16);
284 else
285 return rsp->scsi_status | (DID_OK << 16);
286 }
287
288 err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
289 if (err >= 0)
290 return rsp->scsi_status | (cmd_status[err].result << 16);
291 return rsp->scsi_status | (DID_ERROR << 16);
292}
293
294/**
295 * ibmvfc_retry_cmd - Determine if error status is retryable
296 * @status: status / error class
297 * @error: error
298 *
299 * Return value:
300 * 1 if error should be retried / 0 if it should not
301 **/
302static int ibmvfc_retry_cmd(u16 status, u16 error)
303{
304 int rc = ibmvfc_get_err_index(status, error);
305
306 if (rc >= 0)
307 return cmd_status[rc].retry;
308 return 1;
309}
310
311static const char *unknown_fc_explain = "unknown fc explain";
312
313static const struct {
314 u16 fc_explain;
315 char *name;
316} ls_explain [] = {
317 { 0x00, "no additional explanation" },
318 { 0x01, "service parameter error - options" },
319 { 0x03, "service parameter error - initiator control" },
320 { 0x05, "service parameter error - recipient control" },
321 { 0x07, "service parameter error - received data field size" },
322 { 0x09, "service parameter error - concurrent seq" },
323 { 0x0B, "service parameter error - credit" },
324 { 0x0D, "invalid N_Port/F_Port_Name" },
325 { 0x0E, "invalid node/Fabric Name" },
326 { 0x0F, "invalid common service parameters" },
327 { 0x11, "invalid association header" },
328 { 0x13, "association header required" },
329 { 0x15, "invalid originator S_ID" },
330 { 0x17, "invalid OX_ID-RX-ID combination" },
331 { 0x19, "command (request) already in progress" },
332 { 0x1E, "N_Port Login requested" },
333 { 0x1F, "Invalid N_Port_ID" },
334};
335
336static const struct {
337 u16 fc_explain;
338 char *name;
339} gs_explain [] = {
340 { 0x00, "no additional explanation" },
341 { 0x01, "port identifier not registered" },
342 { 0x02, "port name not registered" },
343 { 0x03, "node name not registered" },
344 { 0x04, "class of service not registered" },
345 { 0x06, "initial process associator not registered" },
346 { 0x07, "FC-4 TYPEs not registered" },
347 { 0x08, "symbolic port name not registered" },
348 { 0x09, "symbolic node name not registered" },
349 { 0x0A, "port type not registered" },
350 { 0xF0, "authorization exception" },
351 { 0xF1, "authentication exception" },
352 { 0xF2, "data base full" },
353 { 0xF3, "data base empty" },
354 { 0xF4, "processing request" },
355 { 0xF5, "unable to verify connection" },
356 { 0xF6, "devices not in a common zone" },
357};
358
359/**
360 * ibmvfc_get_ls_explain - Return the FC Explain description text
361 * @status: FC Explain status
362 *
363 * Returns:
364 * error string
365 **/
366static const char *ibmvfc_get_ls_explain(u16 status)
367{
368 int i;
369
370 for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
371 if (ls_explain[i].fc_explain == status)
372 return ls_explain[i].name;
373
374 return unknown_fc_explain;
375}
376
377/**
378 * ibmvfc_get_gs_explain - Return the FC Explain description text
379 * @status: FC Explain status
380 *
381 * Returns:
382 * error string
383 **/
384static const char *ibmvfc_get_gs_explain(u16 status)
385{
386 int i;
387
388 for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
389 if (gs_explain[i].fc_explain == status)
390 return gs_explain[i].name;
391
392 return unknown_fc_explain;
393}
394
395static const struct {
396 enum ibmvfc_fc_type fc_type;
397 char *name;
398} fc_type [] = {
399 { IBMVFC_FABRIC_REJECT, "fabric reject" },
400 { IBMVFC_PORT_REJECT, "port reject" },
401 { IBMVFC_LS_REJECT, "ELS reject" },
402 { IBMVFC_FABRIC_BUSY, "fabric busy" },
403 { IBMVFC_PORT_BUSY, "port busy" },
404 { IBMVFC_BASIC_REJECT, "basic reject" },
405};
406
407static const char *unknown_fc_type = "unknown fc type";
408
409/**
410 * ibmvfc_get_fc_type - Return the FC Type description text
411 * @status: FC Type error status
412 *
413 * Returns:
414 * error string
415 **/
416static const char *ibmvfc_get_fc_type(u16 status)
417{
418 int i;
419
420 for (i = 0; i < ARRAY_SIZE(fc_type); i++)
421 if (fc_type[i].fc_type == status)
422 return fc_type[i].name;
423
424 return unknown_fc_type;
425}
426
427/**
428 * ibmvfc_set_tgt_action - Set the next init action for the target
429 * @tgt: ibmvfc target struct
430 * @action: action to perform
431 *
432 **/
433static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
434 enum ibmvfc_target_action action)
435{
436 switch (tgt->action) {
437 case IBMVFC_TGT_ACTION_DEL_RPORT:
438 break;
439 default:
440 tgt->action = action;
441 break;
442 }
443}
444
445/**
446 * ibmvfc_set_host_state - Set the state for the host
447 * @vhost: ibmvfc host struct
448 * @state: state to set host to
449 *
450 * Returns:
451 * 0 if state changed / non-zero if not changed
452 **/
453static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
454 enum ibmvfc_host_state state)
455{
456 int rc = 0;
457
458 switch (vhost->state) {
459 case IBMVFC_HOST_OFFLINE:
460 rc = -EINVAL;
461 break;
462 default:
463 vhost->state = state;
464 break;
465 };
466
467 return rc;
468}
469
470/**
471 * ibmvfc_set_host_action - Set the next init action for the host
472 * @vhost: ibmvfc host struct
473 * @action: action to perform
474 *
475 **/
476static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
477 enum ibmvfc_host_action action)
478{
479 switch (action) {
480 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
481 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
482 vhost->action = action;
483 break;
484 case IBMVFC_HOST_ACTION_INIT_WAIT:
485 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
486 vhost->action = action;
487 break;
488 case IBMVFC_HOST_ACTION_QUERY:
489 switch (vhost->action) {
490 case IBMVFC_HOST_ACTION_INIT_WAIT:
491 case IBMVFC_HOST_ACTION_NONE:
492 case IBMVFC_HOST_ACTION_TGT_ADD:
493 vhost->action = action;
494 break;
495 default:
496 break;
497 };
498 break;
499 case IBMVFC_HOST_ACTION_TGT_INIT:
500 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
501 vhost->action = action;
502 break;
503 case IBMVFC_HOST_ACTION_INIT:
504 case IBMVFC_HOST_ACTION_TGT_DEL:
505 case IBMVFC_HOST_ACTION_QUERY_TGTS:
506 case IBMVFC_HOST_ACTION_TGT_ADD:
507 case IBMVFC_HOST_ACTION_NONE:
508 default:
509 vhost->action = action;
510 break;
511 };
512}
513
514/**
515 * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
516 * @vhost: ibmvfc host struct
517 *
518 * Return value:
519 * nothing
520 **/
521static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
522{
523 if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
524 scsi_block_requests(vhost->host);
525 ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING);
526 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
527 } else
528 vhost->reinit = 1;
529
530 wake_up(&vhost->work_wait_q);
531}
532
533/**
534 * ibmvfc_link_down - Handle a link down event from the adapter
535 * @vhost: ibmvfc host struct
536 * @state: ibmvfc host state to enter
537 *
538 **/
539static void ibmvfc_link_down(struct ibmvfc_host *vhost,
540 enum ibmvfc_host_state state)
541{
542 struct ibmvfc_target *tgt;
543
544 ENTER;
545 scsi_block_requests(vhost->host);
546 list_for_each_entry(tgt, &vhost->targets, queue)
547 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
548 ibmvfc_set_host_state(vhost, state);
549 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
550 vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
551 wake_up(&vhost->work_wait_q);
552 LEAVE;
553}
554
555/**
556 * ibmvfc_init_host - Start host initialization
557 * @vhost: ibmvfc host struct
558 *
559 * Return value:
560 * nothing
561 **/
562static void ibmvfc_init_host(struct ibmvfc_host *vhost)
563{
564 struct ibmvfc_target *tgt;
565
566 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
567 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
568 dev_err(vhost->dev,
569 "Host initialization retries exceeded. Taking adapter offline\n");
570 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
571 return;
572 }
573 }
574
575 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
576 list_for_each_entry(tgt, &vhost->targets, queue)
577 tgt->need_login = 1;
578 scsi_block_requests(vhost->host);
579 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
580 vhost->job_step = ibmvfc_npiv_login;
581 wake_up(&vhost->work_wait_q);
582 }
583}
584
585/**
586 * ibmvfc_send_crq - Send a CRQ
587 * @vhost: ibmvfc host struct
588 * @word1: the first 64 bits of the data
589 * @word2: the second 64 bits of the data
590 *
591 * Return value:
592 * 0 on success / other on failure
593 **/
594static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
595{
596 struct vio_dev *vdev = to_vio_dev(vhost->dev);
597 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
598}
599
600/**
601 * ibmvfc_send_crq_init - Send a CRQ init message
602 * @vhost: ibmvfc host struct
603 *
604 * Return value:
605 * 0 on success / other on failure
606 **/
607static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
608{
609 ibmvfc_dbg(vhost, "Sending CRQ init\n");
610 return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
611}
612
613/**
614 * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
615 * @vhost: ibmvfc host struct
616 *
617 * Return value:
618 * 0 on success / other on failure
619 **/
620static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
621{
622 ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
623 return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
624}
625
626/**
627 * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
628 * @vhost: ibmvfc host struct
629 *
630 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
631 * the crq with the hypervisor.
632 **/
633static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
634{
635 long rc;
636 struct vio_dev *vdev = to_vio_dev(vhost->dev);
637 struct ibmvfc_crq_queue *crq = &vhost->crq;
638
639 ibmvfc_dbg(vhost, "Releasing CRQ\n");
640 free_irq(vdev->irq, vhost);
641 do {
642 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
643 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
644
645 vhost->state = IBMVFC_NO_CRQ;
646 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
647 free_page((unsigned long)crq->msgs);
648}
649
650/**
651 * ibmvfc_reenable_crq_queue - reenables the CRQ
652 * @vhost: ibmvfc host struct
653 *
654 * Return value:
655 * 0 on success / other on failure
656 **/
657static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
658{
659 int rc;
660 struct vio_dev *vdev = to_vio_dev(vhost->dev);
661
662 /* Re-enable the CRQ */
663 do {
664 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
665 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
666
667 if (rc)
668 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
669
670 return rc;
671}
672
673/**
674 * ibmvfc_reset_crq - resets a crq after a failure
675 * @vhost: ibmvfc host struct
676 *
677 * Return value:
678 * 0 on success / other on failure
679 **/
680static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
681{
682 int rc;
683 struct vio_dev *vdev = to_vio_dev(vhost->dev);
684 struct ibmvfc_crq_queue *crq = &vhost->crq;
685
686 /* Close the CRQ */
687 do {
688 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
689 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
690
691 vhost->state = IBMVFC_NO_CRQ;
692 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
693
694 /* Clean out the queue */
695 memset(crq->msgs, 0, PAGE_SIZE);
696 crq->cur = 0;
697
698 /* And re-open it again */
699 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
700 crq->msg_token, PAGE_SIZE);
701
702 if (rc == H_CLOSED)
703 /* Adapter is good, but other end is not ready */
704 dev_warn(vhost->dev, "Partner adapter not ready\n");
705 else if (rc != 0)
706 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
707
708 return rc;
709}
710
711/**
712 * ibmvfc_valid_event - Determines if event is valid.
713 * @pool: event_pool that contains the event
714 * @evt: ibmvfc event to be checked for validity
715 *
716 * Return value:
717 * 1 if event is valid / 0 if event is not valid
718 **/
719static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
720 struct ibmvfc_event *evt)
721{
722 int index = evt - pool->events;
723 if (index < 0 || index >= pool->size) /* outside of bounds */
724 return 0;
725 if (evt != pool->events + index) /* unaligned */
726 return 0;
727 return 1;
728}
729
730/**
731 * ibmvfc_free_event - Free the specified event
732 * @evt: ibmvfc_event to be freed
733 *
734 **/
735static void ibmvfc_free_event(struct ibmvfc_event *evt)
736{
737 struct ibmvfc_host *vhost = evt->vhost;
738 struct ibmvfc_event_pool *pool = &vhost->pool;
739
740 BUG_ON(!ibmvfc_valid_event(pool, evt));
741 BUG_ON(atomic_inc_return(&evt->free) != 1);
742 list_add_tail(&evt->queue, &vhost->free);
743}
744
745/**
746 * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
747 * @evt: ibmvfc event struct
748 *
749 * This function does not setup any error status, that must be done
750 * before this function gets called.
751 **/
752static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
753{
754 struct scsi_cmnd *cmnd = evt->cmnd;
755
756 if (cmnd) {
757 scsi_dma_unmap(cmnd);
758 cmnd->scsi_done(cmnd);
759 }
760
761 ibmvfc_free_event(evt);
762}
763
764/**
765 * ibmvfc_fail_request - Fail request with specified error code
766 * @evt: ibmvfc event struct
767 * @error_code: error code to fail request with
768 *
769 * Return value:
770 * none
771 **/
772static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
773{
774 if (evt->cmnd) {
775 evt->cmnd->result = (error_code << 16);
776 evt->done = ibmvfc_scsi_eh_done;
777 } else
778 evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
779
780 list_del(&evt->queue);
781 del_timer(&evt->timer);
782 ibmvfc_trc_end(evt);
783 evt->done(evt);
784}
785
786/**
787 * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
788 * @vhost: ibmvfc host struct
789 * @error_code: error code to fail requests with
790 *
791 * Return value:
792 * none
793 **/
794static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
795{
796 struct ibmvfc_event *evt, *pos;
797
798 ibmvfc_dbg(vhost, "Purging all requests\n");
799 list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
800 ibmvfc_fail_request(evt, error_code);
801}
802
803/**
804 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
805 * @vhost: struct ibmvfc host to reset
806 **/
807static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
808{
809 int rc;
810
811 scsi_block_requests(vhost->host);
812 ibmvfc_purge_requests(vhost, DID_ERROR);
813 if ((rc = ibmvfc_reset_crq(vhost)) ||
814 (rc = ibmvfc_send_crq_init(vhost)) ||
815 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
816 dev_err(vhost->dev, "Error after reset rc=%d\n", rc);
817 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
818 } else
819 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
820}
821
822/**
823 * ibmvfc_reset_host - Reset the connection to the server
824 * @vhost: struct ibmvfc host to reset
825 **/
826static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
827{
828 unsigned long flags;
829
830 spin_lock_irqsave(vhost->host->host_lock, flags);
831 __ibmvfc_reset_host(vhost);
832 spin_unlock_irqrestore(vhost->host->host_lock, flags);
833}
834
835/**
836 * ibmvfc_retry_host_init - Retry host initialization if allowed
837 * @vhost: ibmvfc host struct
838 *
839 **/
840static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
841{
842 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
843 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
844 dev_err(vhost->dev,
845 "Host initialization retries exceeded. Taking adapter offline\n");
846 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
847 } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
848 __ibmvfc_reset_host(vhost);
849 else
850 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
851 }
852
853 wake_up(&vhost->work_wait_q);
854}
855
856/**
857 * __ibmvfc_find_target - Find the specified scsi_target (no locking)
858 * @starget: scsi target struct
859 *
860 * Return value:
861 * ibmvfc_target struct / NULL if not found
862 **/
863static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget)
864{
865 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
866 struct ibmvfc_host *vhost = shost_priv(shost);
867 struct ibmvfc_target *tgt;
868
869 list_for_each_entry(tgt, &vhost->targets, queue)
870 if (tgt->target_id == starget->id)
871 return tgt;
872 return NULL;
873}
874
875/**
876 * ibmvfc_find_target - Find the specified scsi_target
877 * @starget: scsi target struct
878 *
879 * Return value:
880 * ibmvfc_target struct / NULL if not found
881 **/
882static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget)
883{
884 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
885 struct ibmvfc_target *tgt;
886 unsigned long flags;
887
888 spin_lock_irqsave(shost->host_lock, flags);
889 tgt = __ibmvfc_find_target(starget);
890 spin_unlock_irqrestore(shost->host_lock, flags);
891 return tgt;
892}
893
894/**
895 * ibmvfc_get_host_speed - Get host port speed
896 * @shost: scsi host struct
897 *
898 * Return value:
899 * none
900 **/
901static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
902{
903 struct ibmvfc_host *vhost = shost_priv(shost);
904 unsigned long flags;
905
906 spin_lock_irqsave(shost->host_lock, flags);
907 if (vhost->state == IBMVFC_ACTIVE) {
908 switch (vhost->login_buf->resp.link_speed / 100) {
909 case 1:
910 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
911 break;
912 case 2:
913 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
914 break;
915 case 4:
916 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
917 break;
918 case 8:
919 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
920 break;
921 case 10:
922 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
923 break;
924 case 16:
925 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
926 break;
927 default:
928 ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n",
929 vhost->login_buf->resp.link_speed / 100);
930 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
931 break;
932 }
933 } else
934 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
935 spin_unlock_irqrestore(shost->host_lock, flags);
936}
937
938/**
939 * ibmvfc_get_host_port_state - Get host port state
940 * @shost: scsi host struct
941 *
942 * Return value:
943 * none
944 **/
945static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
946{
947 struct ibmvfc_host *vhost = shost_priv(shost);
948 unsigned long flags;
949
950 spin_lock_irqsave(shost->host_lock, flags);
951 switch (vhost->state) {
952 case IBMVFC_INITIALIZING:
953 case IBMVFC_ACTIVE:
954 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
955 break;
956 case IBMVFC_LINK_DOWN:
957 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
958 break;
959 case IBMVFC_LINK_DEAD:
960 case IBMVFC_HOST_OFFLINE:
961 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
962 break;
963 case IBMVFC_HALTED:
964 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
965 break;
966 default:
967 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
968 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
969 break;
970 }
971 spin_unlock_irqrestore(shost->host_lock, flags);
972}
973
974/**
975 * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
976 * @rport: rport struct
977 * @timeout: timeout value
978 *
979 * Return value:
980 * none
981 **/
982static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
983{
984 if (timeout)
985 rport->dev_loss_tmo = timeout;
986 else
987 rport->dev_loss_tmo = 1;
988}
989
990/**
991 * ibmvfc_get_starget_node_name - Get SCSI target's node name
992 * @starget: scsi target struct
993 *
994 * Return value:
995 * none
996 **/
997static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
998{
999 struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
1000 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1001}
1002
1003/**
1004 * ibmvfc_get_starget_port_name - Get SCSI target's port name
1005 * @starget: scsi target struct
1006 *
1007 * Return value:
1008 * none
1009 **/
1010static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1011{
1012 struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
1013 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1014}
1015
1016/**
1017 * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1018 * @starget: scsi target struct
1019 *
1020 * Return value:
1021 * none
1022 **/
1023static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1024{
1025 struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
1026 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1027}
1028
1029/**
1030 * ibmvfc_wait_while_resetting - Wait while the host resets
1031 * @vhost: ibmvfc host struct
1032 *
1033 * Return value:
1034 * 0 on success / other on failure
1035 **/
1036static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1037{
1038 long timeout = wait_event_timeout(vhost->init_wait_q,
1039 (vhost->state == IBMVFC_ACTIVE ||
1040 vhost->state == IBMVFC_HOST_OFFLINE ||
1041 vhost->state == IBMVFC_LINK_DEAD),
1042 (init_timeout * HZ));
1043
1044 return timeout ? 0 : -EIO;
1045}
1046
1047/**
1048 * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1049 * @shost: scsi host struct
1050 *
1051 * Return value:
1052 * 0 on success / other on failure
1053 **/
1054static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1055{
1056 struct ibmvfc_host *vhost = shost_priv(shost);
1057
1058 dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1059 ibmvfc_reset_host(vhost);
1060 return ibmvfc_wait_while_resetting(vhost);
1061}
1062
1063/**
1064 * ibmvfc_gather_partition_info - Gather info about the LPAR
1065 *
1066 * Return value:
1067 * none
1068 **/
1069static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1070{
1071 struct device_node *rootdn;
1072 const char *name;
1073 const unsigned int *num;
1074
1075 rootdn = of_find_node_by_path("/");
1076 if (!rootdn)
1077 return;
1078
1079 name = of_get_property(rootdn, "ibm,partition-name", NULL);
1080 if (name)
1081 strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1082 num = of_get_property(rootdn, "ibm,partition-no", NULL);
1083 if (num)
1084 vhost->partition_number = *num;
1085 of_node_put(rootdn);
1086}
1087
1088/**
1089 * ibmvfc_set_login_info - Setup info for NPIV login
1090 * @vhost: ibmvfc host struct
1091 *
1092 * Return value:
1093 * none
1094 **/
1095static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1096{
1097 struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1098 struct device_node *of_node = vhost->dev->archdata.of_node;
1099 const char *location;
1100
1101 memset(login_info, 0, sizeof(*login_info));
1102
1103 login_info->ostype = IBMVFC_OS_LINUX;
1104 login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
1105 login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
1106 login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
1107 login_info->partition_num = vhost->partition_number;
1108 login_info->vfc_frame_version = 1;
1109 login_info->fcp_version = 3;
1110 if (vhost->client_migrated)
1111 login_info->flags = IBMVFC_CLIENT_MIGRATED;
1112
1113 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1114 login_info->capabilities = IBMVFC_CAN_MIGRATE;
1115 login_info->async.va = vhost->async_crq.msg_token;
1116 login_info->async.len = vhost->async_crq.size;
1117 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1118 strncpy(login_info->device_name,
1119 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
1120
1121 location = of_get_property(of_node, "ibm,loc-code", NULL);
1122 location = location ? location : vhost->dev->bus_id;
1123 strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1124}
1125
1126/**
1127 * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1128 * @vhost: ibmvfc host who owns the event pool
1129 *
1130 * Returns zero on success.
1131 **/
1132static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
1133{
1134 int i;
1135 struct ibmvfc_event_pool *pool = &vhost->pool;
1136
1137 ENTER;
1138 pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1139 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
1140 if (!pool->events)
1141 return -ENOMEM;
1142
1143 pool->iu_storage = dma_alloc_coherent(vhost->dev,
1144 pool->size * sizeof(*pool->iu_storage),
1145 &pool->iu_token, 0);
1146
1147 if (!pool->iu_storage) {
1148 kfree(pool->events);
1149 return -ENOMEM;
1150 }
1151
1152 for (i = 0; i < pool->size; ++i) {
1153 struct ibmvfc_event *evt = &pool->events[i];
1154 atomic_set(&evt->free, 1);
1155 evt->crq.valid = 0x80;
1156 evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
1157 evt->xfer_iu = pool->iu_storage + i;
1158 evt->vhost = vhost;
1159 evt->ext_list = NULL;
1160 list_add_tail(&evt->queue, &vhost->free);
1161 }
1162
1163 LEAVE;
1164 return 0;
1165}
1166
1167/**
1168 * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1169 * @vhost: ibmvfc host who owns the event pool
1170 *
1171 **/
1172static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
1173{
1174 int i;
1175 struct ibmvfc_event_pool *pool = &vhost->pool;
1176
1177 ENTER;
1178 for (i = 0; i < pool->size; ++i) {
1179 list_del(&pool->events[i].queue);
1180 BUG_ON(atomic_read(&pool->events[i].free) != 1);
1181 if (pool->events[i].ext_list)
1182 dma_pool_free(vhost->sg_pool,
1183 pool->events[i].ext_list,
1184 pool->events[i].ext_list_token);
1185 }
1186
1187 kfree(pool->events);
1188 dma_free_coherent(vhost->dev,
1189 pool->size * sizeof(*pool->iu_storage),
1190 pool->iu_storage, pool->iu_token);
1191 LEAVE;
1192}
1193
1194/**
1195 * ibmvfc_get_event - Gets the next free event in pool
1196 * @vhost: ibmvfc host struct
1197 *
1198 * Returns a free event from the pool.
1199 **/
1200static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
1201{
1202 struct ibmvfc_event *evt;
1203
1204 BUG_ON(list_empty(&vhost->free));
1205 evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1206 atomic_set(&evt->free, 0);
1207 list_del(&evt->queue);
1208 return evt;
1209}
1210
1211/**
1212 * ibmvfc_init_event - Initialize fields in an event struct that are always
1213 * required.
1214 * @evt: The event
1215 * @done: Routine to call when the event is responded to
1216 * @format: SRP or MAD format
1217 **/
1218static void ibmvfc_init_event(struct ibmvfc_event *evt,
1219 void (*done) (struct ibmvfc_event *), u8 format)
1220{
1221 evt->cmnd = NULL;
1222 evt->sync_iu = NULL;
1223 evt->crq.format = format;
1224 evt->done = done;
1225}
1226
1227/**
1228 * ibmvfc_map_sg_list - Initialize scatterlist
1229 * @scmd: scsi command struct
1230 * @nseg: number of scatterlist segments
1231 * @md: memory descriptor list to initialize
1232 **/
1233static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1234 struct srp_direct_buf *md)
1235{
1236 int i;
1237 struct scatterlist *sg;
1238
1239 scsi_for_each_sg(scmd, sg, nseg, i) {
1240 md[i].va = sg_dma_address(sg);
1241 md[i].len = sg_dma_len(sg);
1242 md[i].key = 0;
1243 }
1244}
1245
1246/**
1247 * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
1248 * @scmd: Scsi_Cmnd with the scatterlist
1249 * @evt: ibmvfc event struct
1250 * @vfc_cmd: vfc_cmd that contains the memory descriptor
1251 * @dev: device for which to map dma memory
1252 *
1253 * Returns:
1254 * 0 on success / non-zero on failure
1255 **/
1256static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1257 struct ibmvfc_event *evt,
1258 struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1259{
1260
1261 int sg_mapped;
1262 struct srp_direct_buf *data = &vfc_cmd->ioba;
1263 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1264
1265 sg_mapped = scsi_dma_map(scmd);
1266 if (!sg_mapped) {
1267 vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
1268 return 0;
1269 } else if (unlikely(sg_mapped < 0)) {
1270 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1271 scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1272 return sg_mapped;
1273 }
1274
1275 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1276 vfc_cmd->flags |= IBMVFC_WRITE;
1277 vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
1278 } else {
1279 vfc_cmd->flags |= IBMVFC_READ;
1280 vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
1281 }
1282
1283 if (sg_mapped == 1) {
1284 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1285 return 0;
1286 }
1287
1288 vfc_cmd->flags |= IBMVFC_SCATTERLIST;
1289
1290 if (!evt->ext_list) {
1291 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1292 &evt->ext_list_token);
1293
1294 if (!evt->ext_list) {
1295 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1296 return -ENOMEM;
1297 }
1298 }
1299
1300 ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1301
1302 data->va = evt->ext_list_token;
1303 data->len = sg_mapped * sizeof(struct srp_direct_buf);
1304 data->key = 0;
1305 return 0;
1306}
1307
1308/**
1309 * ibmvfc_timeout - Internal command timeout handler
1310 * @evt: struct ibmvfc_event that timed out
1311 *
1312 * Called when an internally generated command times out
1313 **/
1314static void ibmvfc_timeout(struct ibmvfc_event *evt)
1315{
1316 struct ibmvfc_host *vhost = evt->vhost;
1317 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1318 ibmvfc_reset_host(vhost);
1319}
1320
1321/**
1322 * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1323 * @evt: event to be sent
1324 * @vhost: ibmvfc host struct
1325 * @timeout: timeout in seconds - 0 means do not time command
1326 *
1327 * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1328 **/
1329static int ibmvfc_send_event(struct ibmvfc_event *evt,
1330 struct ibmvfc_host *vhost, unsigned long timeout)
1331{
1332 u64 *crq_as_u64 = (u64 *) &evt->crq;
1333 int rc;
1334
1335 /* Copy the IU into the transfer area */
1336 *evt->xfer_iu = evt->iu;
1337 if (evt->crq.format == IBMVFC_CMD_FORMAT)
1338 evt->xfer_iu->cmd.tag = (u64)evt;
1339 else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1340 evt->xfer_iu->mad_common.tag = (u64)evt;
1341 else
1342 BUG();
1343
1344 list_add_tail(&evt->queue, &vhost->sent);
1345 init_timer(&evt->timer);
1346
1347 if (timeout) {
1348 evt->timer.data = (unsigned long) evt;
1349 evt->timer.expires = jiffies + (timeout * HZ);
1350 evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
1351 add_timer(&evt->timer);
1352 }
1353
1354 if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
1355 list_del(&evt->queue);
1356 del_timer(&evt->timer);
1357
1358 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1359 * Firmware will send a CRQ with a transport event (0xFF) to
1360 * tell this client what has happened to the transport. This
1361 * will be handled in ibmvfc_handle_crq()
1362 */
1363 if (rc == H_CLOSED) {
1364 if (printk_ratelimit())
1365 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1366 if (evt->cmnd)
1367 scsi_dma_unmap(evt->cmnd);
1368 ibmvfc_free_event(evt);
1369 return SCSI_MLQUEUE_HOST_BUSY;
1370 }
1371
1372 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1373 if (evt->cmnd) {
1374 evt->cmnd->result = DID_ERROR << 16;
1375 evt->done = ibmvfc_scsi_eh_done;
1376 } else
1377 evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
1378
1379 evt->done(evt);
1380 } else
1381 ibmvfc_trc_start(evt);
1382
1383 return 0;
1384}
1385
1386/**
1387 * ibmvfc_log_error - Log an error for the failed command if appropriate
1388 * @evt: ibmvfc event to log
1389 *
1390 **/
1391static void ibmvfc_log_error(struct ibmvfc_event *evt)
1392{
1393 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1394 struct ibmvfc_host *vhost = evt->vhost;
1395 struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1396 struct scsi_cmnd *cmnd = evt->cmnd;
1397 const char *err = unknown_error;
1398 int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
1399 int logerr = 0;
1400 int rsp_code = 0;
1401
1402 if (index >= 0) {
1403 logerr = cmd_status[index].log;
1404 err = cmd_status[index].name;
1405 }
1406
1407 if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL))
1408 return;
1409
1410 if (rsp->flags & FCP_RSP_LEN_VALID)
1411 rsp_code = rsp->data.info.rsp_code;
1412
1413 scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
1414 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1415 cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
1416 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1417}
1418
1419/**
1420 * ibmvfc_scsi_done - Handle responses from commands
1421 * @evt: ibmvfc event to be handled
1422 *
1423 * Used as a callback when sending scsi cmds.
1424 **/
1425static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1426{
1427 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1428 struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1429 struct scsi_cmnd *cmnd = evt->cmnd;
1430 int rsp_len = 0;
1431 int sense_len = rsp->fcp_sense_len;
1432
1433 if (cmnd) {
1434 if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
1435 scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
1436 else if (rsp->flags & FCP_RESID_UNDER)
1437 scsi_set_resid(cmnd, rsp->fcp_resid);
1438 else
1439 scsi_set_resid(cmnd, 0);
1440
1441 if (vfc_cmd->status) {
1442 cmnd->result = ibmvfc_get_err_result(vfc_cmd);
1443
1444 if (rsp->flags & FCP_RSP_LEN_VALID)
1445 rsp_len = rsp->fcp_rsp_len;
1446 if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1447 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1448 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len)
1449 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1450
1451 ibmvfc_log_error(evt);
1452 }
1453
1454 if (!cmnd->result &&
1455 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1456 cmnd->result = (DID_ERROR << 16);
1457
1458 scsi_dma_unmap(cmnd);
1459 cmnd->scsi_done(cmnd);
1460 }
1461
1462 ibmvfc_free_event(evt);
1463}
1464
1465/**
1466 * ibmvfc_host_chkready - Check if the host can accept commands
1467 * @vhost: struct ibmvfc host
1468 *
1469 * Returns:
1470 * 1 if host can accept command / 0 if not
1471 **/
1472static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1473{
1474 int result = 0;
1475
1476 switch (vhost->state) {
1477 case IBMVFC_LINK_DEAD:
1478 case IBMVFC_HOST_OFFLINE:
1479 result = DID_NO_CONNECT << 16;
1480 break;
1481 case IBMVFC_NO_CRQ:
1482 case IBMVFC_INITIALIZING:
1483 case IBMVFC_HALTED:
1484 case IBMVFC_LINK_DOWN:
1485 result = DID_REQUEUE << 16;
1486 break;
1487 case IBMVFC_ACTIVE:
1488 result = 0;
1489 break;
1490 };
1491
1492 return result;
1493}
1494
1495/**
1496 * ibmvfc_queuecommand - The queuecommand function of the scsi template
1497 * @cmnd: struct scsi_cmnd to be executed
1498 * @done: Callback function to be called when cmnd is completed
1499 *
1500 * Returns:
1501 * 0 on success / other on failure
1502 **/
1503static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
1504 void (*done) (struct scsi_cmnd *))
1505{
1506 struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
1507 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1508 struct ibmvfc_cmd *vfc_cmd;
1509 struct ibmvfc_event *evt;
1510 u8 tag[2];
1511 int rc;
1512
1513 if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1514 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1515 cmnd->result = rc;
1516 done(cmnd);
1517 return 0;
1518 }
1519
1520 cmnd->result = (DID_OK << 16);
1521 evt = ibmvfc_get_event(vhost);
1522 ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1523 evt->cmnd = cmnd;
1524 cmnd->scsi_done = done;
1525 vfc_cmd = &evt->iu.cmd;
1526 memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1527 vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1528 vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
1529 vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
1530 vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
1531 vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
1532 vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
1533 vfc_cmd->tgt_scsi_id = rport->port_id;
1534 if ((rport->supported_classes & FC_COS_CLASS3) &&
1535 (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3))
1536 vfc_cmd->flags = IBMVFC_CLASS_3_ERR;
1537 vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
1538 int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
1539 memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
1540
1541 if (scsi_populate_tag_msg(cmnd, tag)) {
1542 vfc_cmd->task_tag = tag[1];
1543 switch (tag[0]) {
1544 case MSG_SIMPLE_TAG:
1545 vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
1546 break;
1547 case MSG_HEAD_TAG:
1548 vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
1549 break;
1550 case MSG_ORDERED_TAG:
1551 vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
1552 break;
1553 };
1554 }
1555
1556 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1557 return ibmvfc_send_event(evt, vhost, 0);
1558
1559 ibmvfc_free_event(evt);
1560 if (rc == -ENOMEM)
1561 return SCSI_MLQUEUE_HOST_BUSY;
1562
1563 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1564 scmd_printk(KERN_ERR, cmnd,
1565 "Failed to map DMA buffer for command. rc=%d\n", rc);
1566
1567 cmnd->result = DID_ERROR << 16;
1568 done(cmnd);
1569 return 0;
1570}
1571
1572/**
1573 * ibmvfc_sync_completion - Signal that a synchronous command has completed
1574 * @evt: ibmvfc event struct
1575 *
1576 **/
1577static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1578{
1579 /* copy the response back */
1580 if (evt->sync_iu)
1581 *evt->sync_iu = *evt->xfer_iu;
1582
1583 complete(&evt->comp);
1584}
1585
1586/**
1587 * ibmvfc_reset_device - Reset the device with the specified reset type
1588 * @sdev: scsi device to reset
1589 * @type: reset type
1590 * @desc: reset type description for log messages
1591 *
1592 * Returns:
1593 * 0 on success / other on failure
1594 **/
1595static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1596{
1597 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1598 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1599 struct ibmvfc_cmd *tmf;
1600 struct ibmvfc_event *evt;
1601 union ibmvfc_iu rsp_iu;
1602 struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1603 int rsp_rc = -EBUSY;
1604 unsigned long flags;
1605 int rsp_code = 0;
1606
1607 spin_lock_irqsave(vhost->host->host_lock, flags);
1608 if (vhost->state == IBMVFC_ACTIVE) {
1609 evt = ibmvfc_get_event(vhost);
1610 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1611
1612 tmf = &evt->iu.cmd;
1613 memset(tmf, 0, sizeof(*tmf));
1614 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1615 tmf->resp.len = sizeof(tmf->rsp);
1616 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1617 tmf->payload_len = sizeof(tmf->iu);
1618 tmf->resp_len = sizeof(tmf->rsp);
1619 tmf->cancel_key = (unsigned long)sdev->hostdata;
1620 tmf->tgt_scsi_id = rport->port_id;
1621 int_to_scsilun(sdev->lun, &tmf->iu.lun);
1622 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
1623 tmf->iu.tmf_flags = type;
1624 evt->sync_iu = &rsp_iu;
1625
1626 init_completion(&evt->comp);
1627 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1628 }
1629 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1630
1631 if (rsp_rc != 0) {
1632 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
1633 desc, rsp_rc);
1634 return -EIO;
1635 }
1636
1637 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
1638 wait_for_completion(&evt->comp);
1639
1640 if (rsp_iu.cmd.status) {
1641 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1642 rsp_code = fc_rsp->data.info.rsp_code;
1643
1644 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
1645 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
1646 desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
1647 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
1648 fc_rsp->scsi_status);
1649 rsp_rc = -EIO;
1650 } else
1651 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
1652
1653 spin_lock_irqsave(vhost->host->host_lock, flags);
1654 ibmvfc_free_event(evt);
1655 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1656 return rsp_rc;
1657}
1658
1659/**
1660 * ibmvfc_abort_task_set - Abort outstanding commands to the device
1661 * @sdev: scsi device to abort commands
1662 *
1663 * This sends an Abort Task Set to the VIOS for the specified device. This does
1664 * NOT send any cancel to the VIOS. That must be done separately.
1665 *
1666 * Returns:
1667 * 0 on success / other on failure
1668 **/
1669static int ibmvfc_abort_task_set(struct scsi_device *sdev)
1670{
1671 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1672 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1673 struct ibmvfc_cmd *tmf;
1674 struct ibmvfc_event *evt, *found_evt;
1675 union ibmvfc_iu rsp_iu;
1676 struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1677 int rsp_rc = -EBUSY;
1678 unsigned long flags;
1679 int rsp_code = 0;
1680
1681 spin_lock_irqsave(vhost->host->host_lock, flags);
1682 found_evt = NULL;
1683 list_for_each_entry(evt, &vhost->sent, queue) {
1684 if (evt->cmnd && evt->cmnd->device == sdev) {
1685 found_evt = evt;
1686 break;
1687 }
1688 }
1689
1690 if (!found_evt) {
1691 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1692 sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
1693 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1694 return 0;
1695 }
1696
1697 if (vhost->state == IBMVFC_ACTIVE) {
1698 evt = ibmvfc_get_event(vhost);
1699 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
1700
1701 tmf = &evt->iu.cmd;
1702 memset(tmf, 0, sizeof(*tmf));
1703 tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
1704 tmf->resp.len = sizeof(tmf->rsp);
1705 tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
1706 tmf->payload_len = sizeof(tmf->iu);
1707 tmf->resp_len = sizeof(tmf->rsp);
1708 tmf->cancel_key = (unsigned long)sdev->hostdata;
1709 tmf->tgt_scsi_id = rport->port_id;
1710 int_to_scsilun(sdev->lun, &tmf->iu.lun);
1711 tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
1712 tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
1713 evt->sync_iu = &rsp_iu;
1714
1715 init_completion(&evt->comp);
1716 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1717 }
1718
1719 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1720
1721 if (rsp_rc != 0) {
1722 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
1723 return -EIO;
1724 }
1725
1726 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
1727 wait_for_completion(&evt->comp);
1728
1729 if (rsp_iu.cmd.status) {
1730 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
1731 rsp_code = fc_rsp->data.info.rsp_code;
1732
1733 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
1734 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
1735 ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
1736 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
1737 fc_rsp->scsi_status);
1738 rsp_rc = -EIO;
1739 } else
1740 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
1741
1742 spin_lock_irqsave(vhost->host->host_lock, flags);
1743 ibmvfc_free_event(evt);
1744 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1745 return rsp_rc;
1746}
1747
1748/**
1749 * ibmvfc_cancel_all - Cancel all outstanding commands to the device
1750 * @sdev: scsi device to cancel commands
1751 * @type: type of error recovery being performed
1752 *
1753 * This sends a cancel to the VIOS for the specified device. This does
1754 * NOT send any abort to the actual device. That must be done separately.
1755 *
1756 * Returns:
1757 * 0 on success / other on failure
1758 **/
1759static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1760{
1761 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1762 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1763 struct ibmvfc_tmf *tmf;
1764 struct ibmvfc_event *evt, *found_evt;
1765 union ibmvfc_iu rsp;
1766 int rsp_rc = -EBUSY;
1767 unsigned long flags;
1768 u16 status;
1769
1770 ENTER;
1771 spin_lock_irqsave(vhost->host->host_lock, flags);
1772 found_evt = NULL;
1773 list_for_each_entry(evt, &vhost->sent, queue) {
1774 if (evt->cmnd && evt->cmnd->device == sdev) {
1775 found_evt = evt;
1776 break;
1777 }
1778 }
1779
1780 if (!found_evt) {
1781 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1782 sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
1783 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1784 return 0;
1785 }
1786
1787 if (vhost->state == IBMVFC_ACTIVE) {
1788 evt = ibmvfc_get_event(vhost);
1789 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1790
1791 tmf = &evt->iu.tmf;
1792 memset(tmf, 0, sizeof(*tmf));
1793 tmf->common.version = 1;
1794 tmf->common.opcode = IBMVFC_TMF_MAD;
1795 tmf->common.length = sizeof(*tmf);
1796 tmf->scsi_id = rport->port_id;
1797 int_to_scsilun(sdev->lun, &tmf->lun);
1798 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
1799 tmf->cancel_key = (unsigned long)sdev->hostdata;
1800 tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata);
1801
1802 evt->sync_iu = &rsp;
1803 init_completion(&evt->comp);
1804 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
1805 }
1806
1807 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1808
1809 if (rsp_rc != 0) {
1810 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
1811 return -EIO;
1812 }
1813
1814 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
1815
1816 wait_for_completion(&evt->comp);
1817 status = rsp.mad_common.status;
1818 spin_lock_irqsave(vhost->host->host_lock, flags);
1819 ibmvfc_free_event(evt);
1820 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1821
1822 if (status != IBMVFC_MAD_SUCCESS) {
1823 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
1824 return -EIO;
1825 }
1826
1827 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
1828 return 0;
1829}
1830
1831/**
1832 * ibmvfc_eh_abort_handler - Abort a command
1833 * @cmd: scsi command to abort
1834 *
1835 * Returns:
1836 * SUCCESS / FAILED
1837 **/
1838static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1839{
1840 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1841 struct ibmvfc_event *evt, *pos;
1842 int cancel_rc, abort_rc;
1843 unsigned long flags;
1844
1845 ENTER;
1846 ibmvfc_wait_while_resetting(vhost);
1847 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET);
1848 abort_rc = ibmvfc_abort_task_set(cmd->device);
1849
1850 if (!cancel_rc && !abort_rc) {
1851 spin_lock_irqsave(vhost->host->host_lock, flags);
1852 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1853 if (evt->cmnd && evt->cmnd->device == cmd->device)
1854 ibmvfc_fail_request(evt, DID_ABORT);
1855 }
1856 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1857 LEAVE;
1858 return SUCCESS;
1859 }
1860
1861 LEAVE;
1862 return FAILED;
1863}
1864
1865/**
1866 * ibmvfc_eh_device_reset_handler - Reset a single LUN
1867 * @cmd: scsi command struct
1868 *
1869 * Returns:
1870 * SUCCESS / FAILED
1871 **/
1872static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
1873{
1874 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1875 struct ibmvfc_event *evt, *pos;
1876 int cancel_rc, reset_rc;
1877 unsigned long flags;
1878
1879 ENTER;
1880 ibmvfc_wait_while_resetting(vhost);
1881 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET);
1882 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN");
1883
1884 if (!cancel_rc && !reset_rc) {
1885 spin_lock_irqsave(vhost->host->host_lock, flags);
1886 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1887 if (evt->cmnd && evt->cmnd->device == cmd->device)
1888 ibmvfc_fail_request(evt, DID_ABORT);
1889 }
1890 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1891 LEAVE;
1892 return SUCCESS;
1893 }
1894
1895 LEAVE;
1896 return FAILED;
1897}
1898
1899/**
1900 * ibmvfc_dev_cancel_all - Device iterated cancel all function
1901 * @sdev: scsi device struct
1902 * @data: return code
1903 *
1904 **/
1905static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data)
1906{
1907 unsigned long *rc = data;
1908 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
1909}
1910
1911/**
1912 * ibmvfc_dev_abort_all - Device iterated abort task set function
1913 * @sdev: scsi device struct
1914 * @data: return code
1915 *
1916 **/
1917static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
1918{
1919 unsigned long *rc = data;
1920 *rc |= ibmvfc_abort_task_set(sdev);
1921}
1922
1923/**
1924 * ibmvfc_eh_target_reset_handler - Reset the target
1925 * @cmd: scsi command struct
1926 *
1927 * Returns:
1928 * SUCCESS / FAILED
1929 **/
1930static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
1931{
1932 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1933 struct scsi_target *starget = scsi_target(cmd->device);
1934 struct ibmvfc_event *evt, *pos;
1935 int reset_rc;
1936 unsigned long cancel_rc = 0;
1937 unsigned long flags;
1938
1939 ENTER;
1940 ibmvfc_wait_while_resetting(vhost);
1941 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
1942 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target");
1943
1944 if (!cancel_rc && !reset_rc) {
1945 spin_lock_irqsave(vhost->host->host_lock, flags);
1946 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1947 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
1948 ibmvfc_fail_request(evt, DID_ABORT);
1949 }
1950 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1951 LEAVE;
1952 return SUCCESS;
1953 }
1954
1955 LEAVE;
1956 return FAILED;
1957}
1958
1959/**
1960 * ibmvfc_eh_host_reset_handler - Reset the connection to the server
1961 * @cmd: struct scsi_cmnd having problems
1962 *
1963 **/
1964static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
1965{
1966 int rc;
1967 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
1968
1969 dev_err(vhost->dev, "Resetting connection due to error recovery\n");
1970 rc = ibmvfc_issue_fc_host_lip(vhost->host);
1971 return rc ? FAILED : SUCCESS;
1972}
1973
1974/**
1975 * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
1976 * @rport: rport struct
1977 *
1978 * Return value:
1979 * none
1980 **/
1981static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
1982{
1983 struct scsi_target *starget = to_scsi_target(&rport->dev);
1984 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1985 struct ibmvfc_host *vhost = shost_priv(shost);
1986 struct ibmvfc_event *evt, *pos;
1987 unsigned long cancel_rc = 0;
1988 unsigned long abort_rc = 0;
1989 unsigned long flags;
1990
1991 ENTER;
1992 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
1993 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
1994
1995 if (!cancel_rc && !abort_rc) {
1996 spin_lock_irqsave(shost->host_lock, flags);
1997 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1998 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
1999 ibmvfc_fail_request(evt, DID_ABORT);
2000 }
2001 spin_unlock_irqrestore(shost->host_lock, flags);
2002 } else
2003 ibmvfc_issue_fc_host_lip(shost);
2004
2005 scsi_target_unblock(&rport->dev);
2006 LEAVE;
2007}
2008
2009static const struct {
2010 enum ibmvfc_async_event ae;
2011 const char *desc;
2012} ae_desc [] = {
2013 { IBMVFC_AE_ELS_PLOGI, "PLOGI" },
2014 { IBMVFC_AE_ELS_LOGO, "LOGO" },
2015 { IBMVFC_AE_ELS_PRLO, "PRLO" },
2016 { IBMVFC_AE_SCN_NPORT, "N-Port SCN" },
2017 { IBMVFC_AE_SCN_GROUP, "Group SCN" },
2018 { IBMVFC_AE_SCN_DOMAIN, "Domain SCN" },
2019 { IBMVFC_AE_SCN_FABRIC, "Fabric SCN" },
2020 { IBMVFC_AE_LINK_UP, "Link Up" },
2021 { IBMVFC_AE_LINK_DOWN, "Link Down" },
2022 { IBMVFC_AE_LINK_DEAD, "Link Dead" },
2023 { IBMVFC_AE_HALT, "Halt" },
2024 { IBMVFC_AE_RESUME, "Resume" },
2025 { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed" },
2026};
2027
2028static const char *unknown_ae = "Unknown async";
2029
2030/**
2031 * ibmvfc_get_ae_desc - Get text description for async event
2032 * @ae: async event
2033 *
2034 **/
2035static const char *ibmvfc_get_ae_desc(u64 ae)
2036{
2037 int i;
2038
2039 for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2040 if (ae_desc[i].ae == ae)
2041 return ae_desc[i].desc;
2042
2043 return unknown_ae;
2044}
2045
2046/**
2047 * ibmvfc_handle_async - Handle an async event from the adapter
2048 * @crq: crq to process
2049 * @vhost: ibmvfc host struct
2050 *
2051 **/
2052static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2053 struct ibmvfc_host *vhost)
2054{
2055 const char *desc = ibmvfc_get_ae_desc(crq->event);
2056
2057 ibmvfc_log(vhost, 2, "%s event received\n", desc);
2058
2059 switch (crq->event) {
2060 case IBMVFC_AE_LINK_UP:
2061 case IBMVFC_AE_RESUME:
2062 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2063 ibmvfc_init_host(vhost);
2064 break;
2065 case IBMVFC_AE_SCN_FABRIC:
2066 vhost->events_to_log |= IBMVFC_AE_RSCN;
2067 ibmvfc_init_host(vhost);
2068 break;
2069 case IBMVFC_AE_SCN_NPORT:
2070 case IBMVFC_AE_SCN_GROUP:
2071 case IBMVFC_AE_SCN_DOMAIN:
2072 vhost->events_to_log |= IBMVFC_AE_RSCN;
2073 case IBMVFC_AE_ELS_LOGO:
2074 case IBMVFC_AE_ELS_PRLO:
2075 case IBMVFC_AE_ELS_PLOGI:
2076 ibmvfc_reinit_host(vhost);
2077 break;
2078 case IBMVFC_AE_LINK_DOWN:
2079 case IBMVFC_AE_ADAPTER_FAILED:
2080 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2081 break;
2082 case IBMVFC_AE_LINK_DEAD:
2083 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2084 break;
2085 case IBMVFC_AE_HALT:
2086 ibmvfc_link_down(vhost, IBMVFC_HALTED);
2087 break;
2088 default:
2089 dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event);
2090 break;
2091 };
2092}
2093
2094/**
2095 * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2096 * @crq: Command/Response queue
2097 * @vhost: ibmvfc host struct
2098 *
2099 **/
2100static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2101{
2102 long rc;
2103 struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
2104
2105 switch (crq->valid) {
2106 case IBMVFC_CRQ_INIT_RSP:
2107 switch (crq->format) {
2108 case IBMVFC_CRQ_INIT:
2109 dev_info(vhost->dev, "Partner initialized\n");
2110 /* Send back a response */
2111 rc = ibmvfc_send_crq_init_complete(vhost);
2112 if (rc == 0)
2113 ibmvfc_init_host(vhost);
2114 else
2115 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2116 break;
2117 case IBMVFC_CRQ_INIT_COMPLETE:
2118 dev_info(vhost->dev, "Partner initialization complete\n");
2119 ibmvfc_init_host(vhost);
2120 break;
2121 default:
2122 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2123 }
2124 return;
2125 case IBMVFC_CRQ_XPORT_EVENT:
2126 vhost->state = IBMVFC_NO_CRQ;
2127 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2128 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2129 /* We need to re-setup the interpartition connection */
2130 dev_info(vhost->dev, "Re-enabling adapter\n");
2131 vhost->client_migrated = 1;
2132 ibmvfc_purge_requests(vhost, DID_REQUEUE);
2133 if ((rc = ibmvfc_reenable_crq_queue(vhost)) ||
2134 (rc = ibmvfc_send_crq_init(vhost))) {
2135 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2136 dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc);
2137 } else
2138 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2139 } else {
2140 dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
2141
2142 ibmvfc_purge_requests(vhost, DID_ERROR);
2143 if ((rc = ibmvfc_reset_crq(vhost)) ||
2144 (rc = ibmvfc_send_crq_init(vhost))) {
2145 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2146 dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc);
2147 } else
2148 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2149 }
2150 return;
2151 case IBMVFC_CRQ_CMD_RSP:
2152 break;
2153 default:
2154 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2155 return;
2156 }
2157
2158 if (crq->format == IBMVFC_ASYNC_EVENT)
2159 return;
2160
2161 /* The only kind of payload CRQs we should get are responses to
2162 * things we send. Make sure this response is to something we
2163 * actually sent
2164 */
2165 if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2166 dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n",
2167 crq->ioba);
2168 return;
2169 }
2170
2171 if (unlikely(atomic_read(&evt->free))) {
2172 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n",
2173 crq->ioba);
2174 return;
2175 }
2176
2177 del_timer(&evt->timer);
2178 list_del(&evt->queue);
2179 ibmvfc_trc_end(evt);
2180 evt->done(evt);
2181}
2182
2183/**
2184 * ibmvfc_scan_finished - Check if the device scan is done.
2185 * @shost: scsi host struct
2186 * @time: current elapsed time
2187 *
2188 * Returns:
2189 * 0 if scan is not done / 1 if scan is done
2190 **/
2191static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2192{
2193 unsigned long flags;
2194 struct ibmvfc_host *vhost = shost_priv(shost);
2195 int done = 0;
2196
2197 spin_lock_irqsave(shost->host_lock, flags);
2198 if (time >= (init_timeout * HZ)) {
2199 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
2200 "continuing initialization\n", init_timeout);
2201 done = 1;
2202 }
2203
2204 if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
2205 done = 1;
2206 spin_unlock_irqrestore(shost->host_lock, flags);
2207 return done;
2208}
2209
2210/**
2211 * ibmvfc_slave_alloc - Setup the device's task set value
2212 * @sdev: struct scsi_device device to configure
2213 *
2214 * Set the device's task set value so that error handling works as
2215 * expected.
2216 *
2217 * Returns:
2218 * 0 on success / -ENXIO if device does not exist
2219 **/
2220static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2221{
2222 struct Scsi_Host *shost = sdev->host;
2223 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2224 struct ibmvfc_host *vhost = shost_priv(shost);
2225 unsigned long flags = 0;
2226
2227 if (!rport || fc_remote_port_chkready(rport))
2228 return -ENXIO;
2229
2230 spin_lock_irqsave(shost->host_lock, flags);
2231 sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
2232 spin_unlock_irqrestore(shost->host_lock, flags);
2233 return 0;
2234}
2235
2236/**
2237 * ibmvfc_slave_configure - Configure the device
2238 * @sdev: struct scsi_device device to configure
2239 *
2240 * Enable allow_restart for a device if it is a disk. Adjust the
2241 * queue_depth here also.
2242 *
2243 * Returns:
2244 * 0
2245 **/
2246static int ibmvfc_slave_configure(struct scsi_device *sdev)
2247{
2248 struct Scsi_Host *shost = sdev->host;
2249 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2250 unsigned long flags = 0;
2251
2252 spin_lock_irqsave(shost->host_lock, flags);
2253 if (sdev->type == TYPE_DISK)
2254 sdev->allow_restart = 1;
2255
2256 if (sdev->tagged_supported) {
2257 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
2258 scsi_activate_tcq(sdev, sdev->queue_depth);
2259 } else
2260 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2261
2262 rport->dev_loss_tmo = dev_loss_tmo;
2263 spin_unlock_irqrestore(shost->host_lock, flags);
2264 return 0;
2265}
2266
2267/**
2268 * ibmvfc_change_queue_depth - Change the device's queue depth
2269 * @sdev: scsi device struct
2270 * @qdepth: depth to set
2271 *
2272 * Return value:
2273 * actual depth set
2274 **/
2275static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2276{
2277 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2278 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2279
2280 scsi_adjust_queue_depth(sdev, 0, qdepth);
2281 return sdev->queue_depth;
2282}
2283
2284/**
2285 * ibmvfc_change_queue_type - Change the device's queue type
2286 * @sdev: scsi device struct
2287 * @tag_type: type of tags to use
2288 *
2289 * Return value:
2290 * actual queue type set
2291 **/
2292static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
2293{
2294 if (sdev->tagged_supported) {
2295 scsi_set_tag_type(sdev, tag_type);
2296
2297 if (tag_type)
2298 scsi_activate_tcq(sdev, sdev->queue_depth);
2299 else
2300 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2301 } else
2302 tag_type = 0;
2303
2304 return tag_type;
2305}
2306
2307static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2308 struct device_attribute *attr, char *buf)
2309{
2310 struct Scsi_Host *shost = class_to_shost(dev);
2311 struct ibmvfc_host *vhost = shost_priv(shost);
2312
2313 return snprintf(buf, PAGE_SIZE, "%s\n",
2314 vhost->login_buf->resp.partition_name);
2315}
2316
2317static struct device_attribute ibmvfc_host_partition_name = {
2318 .attr = {
2319 .name = "partition_name",
2320 .mode = S_IRUGO,
2321 },
2322 .show = ibmvfc_show_host_partition_name,
2323};
2324
2325static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2326 struct device_attribute *attr, char *buf)
2327{
2328 struct Scsi_Host *shost = class_to_shost(dev);
2329 struct ibmvfc_host *vhost = shost_priv(shost);
2330
2331 return snprintf(buf, PAGE_SIZE, "%s\n",
2332 vhost->login_buf->resp.device_name);
2333}
2334
2335static struct device_attribute ibmvfc_host_device_name = {
2336 .attr = {
2337 .name = "device_name",
2338 .mode = S_IRUGO,
2339 },
2340 .show = ibmvfc_show_host_device_name,
2341};
2342
2343static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2344 struct device_attribute *attr, char *buf)
2345{
2346 struct Scsi_Host *shost = class_to_shost(dev);
2347 struct ibmvfc_host *vhost = shost_priv(shost);
2348
2349 return snprintf(buf, PAGE_SIZE, "%s\n",
2350 vhost->login_buf->resp.port_loc_code);
2351}
2352
2353static struct device_attribute ibmvfc_host_loc_code = {
2354 .attr = {
2355 .name = "port_loc_code",
2356 .mode = S_IRUGO,
2357 },
2358 .show = ibmvfc_show_host_loc_code,
2359};
2360
2361static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2362 struct device_attribute *attr, char *buf)
2363{
2364 struct Scsi_Host *shost = class_to_shost(dev);
2365 struct ibmvfc_host *vhost = shost_priv(shost);
2366
2367 return snprintf(buf, PAGE_SIZE, "%s\n",
2368 vhost->login_buf->resp.drc_name);
2369}
2370
2371static struct device_attribute ibmvfc_host_drc_name = {
2372 .attr = {
2373 .name = "drc_name",
2374 .mode = S_IRUGO,
2375 },
2376 .show = ibmvfc_show_host_drc_name,
2377};
2378
2379static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2380 struct device_attribute *attr, char *buf)
2381{
2382 struct Scsi_Host *shost = class_to_shost(dev);
2383 struct ibmvfc_host *vhost = shost_priv(shost);
2384 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2385}
2386
2387static struct device_attribute ibmvfc_host_npiv_version = {
2388 .attr = {
2389 .name = "npiv_version",
2390 .mode = S_IRUGO,
2391 },
2392 .show = ibmvfc_show_host_npiv_version,
2393};
2394
2395/**
2396 * ibmvfc_show_log_level - Show the adapter's error logging level
2397 * @dev: class device struct
2398 * @buf: buffer
2399 *
2400 * Return value:
2401 * number of bytes printed to buffer
2402 **/
2403static ssize_t ibmvfc_show_log_level(struct device *dev,
2404 struct device_attribute *attr, char *buf)
2405{
2406 struct Scsi_Host *shost = class_to_shost(dev);
2407 struct ibmvfc_host *vhost = shost_priv(shost);
2408 unsigned long flags = 0;
2409 int len;
2410
2411 spin_lock_irqsave(shost->host_lock, flags);
2412 len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
2413 spin_unlock_irqrestore(shost->host_lock, flags);
2414 return len;
2415}
2416
2417/**
2418 * ibmvfc_store_log_level - Change the adapter's error logging level
2419 * @dev: class device struct
2420 * @buf: buffer
2421 *
2422 * Return value:
2423 * number of bytes printed to buffer
2424 **/
2425static ssize_t ibmvfc_store_log_level(struct device *dev,
2426 struct device_attribute *attr,
2427 const char *buf, size_t count)
2428{
2429 struct Scsi_Host *shost = class_to_shost(dev);
2430 struct ibmvfc_host *vhost = shost_priv(shost);
2431 unsigned long flags = 0;
2432
2433 spin_lock_irqsave(shost->host_lock, flags);
2434 vhost->log_level = simple_strtoul(buf, NULL, 10);
2435 spin_unlock_irqrestore(shost->host_lock, flags);
2436 return strlen(buf);
2437}
2438
2439static struct device_attribute ibmvfc_log_level_attr = {
2440 .attr = {
2441 .name = "log_level",
2442 .mode = S_IRUGO | S_IWUSR,
2443 },
2444 .show = ibmvfc_show_log_level,
2445 .store = ibmvfc_store_log_level
2446};
2447
2448#ifdef CONFIG_SCSI_IBMVFC_TRACE
2449/**
2450 * ibmvfc_read_trace - Dump the adapter trace
2451 * @kobj: kobject struct
2452 * @bin_attr: bin_attribute struct
2453 * @buf: buffer
2454 * @off: offset
2455 * @count: buffer size
2456 *
2457 * Return value:
2458 * number of bytes printed to buffer
2459 **/
2460static ssize_t ibmvfc_read_trace(struct kobject *kobj,
2461 struct bin_attribute *bin_attr,
2462 char *buf, loff_t off, size_t count)
2463{
2464 struct device *dev = container_of(kobj, struct device, kobj);
2465 struct Scsi_Host *shost = class_to_shost(dev);
2466 struct ibmvfc_host *vhost = shost_priv(shost);
2467 unsigned long flags = 0;
2468 int size = IBMVFC_TRACE_SIZE;
2469 char *src = (char *)vhost->trace;
2470
2471 if (off > size)
2472 return 0;
2473 if (off + count > size) {
2474 size -= off;
2475 count = size;
2476 }
2477
2478 spin_lock_irqsave(shost->host_lock, flags);
2479 memcpy(buf, &src[off], count);
2480 spin_unlock_irqrestore(shost->host_lock, flags);
2481 return count;
2482}
2483
2484static struct bin_attribute ibmvfc_trace_attr = {
2485 .attr = {
2486 .name = "trace",
2487 .mode = S_IRUGO,
2488 },
2489 .size = 0,
2490 .read = ibmvfc_read_trace,
2491};
2492#endif
2493
2494static struct device_attribute *ibmvfc_attrs[] = {
2495 &ibmvfc_host_partition_name,
2496 &ibmvfc_host_device_name,
2497 &ibmvfc_host_loc_code,
2498 &ibmvfc_host_drc_name,
2499 &ibmvfc_host_npiv_version,
2500 &ibmvfc_log_level_attr,
2501 NULL
2502};
2503
2504static struct scsi_host_template driver_template = {
2505 .module = THIS_MODULE,
2506 .name = "IBM POWER Virtual FC Adapter",
2507 .proc_name = IBMVFC_NAME,
2508 .queuecommand = ibmvfc_queuecommand,
2509 .eh_abort_handler = ibmvfc_eh_abort_handler,
2510 .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
2511 .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
2512 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
2513 .slave_alloc = ibmvfc_slave_alloc,
2514 .slave_configure = ibmvfc_slave_configure,
2515 .scan_finished = ibmvfc_scan_finished,
2516 .change_queue_depth = ibmvfc_change_queue_depth,
2517 .change_queue_type = ibmvfc_change_queue_type,
2518 .cmd_per_lun = 16,
2519 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
2520 .this_id = -1,
2521 .sg_tablesize = SG_ALL,
2522 .max_sectors = IBMVFC_MAX_SECTORS,
2523 .use_clustering = ENABLE_CLUSTERING,
2524 .shost_attrs = ibmvfc_attrs,
2525};
2526
2527/**
2528 * ibmvfc_next_async_crq - Returns the next entry in async queue
2529 * @vhost: ibmvfc host struct
2530 *
2531 * Returns:
2532 * Pointer to next entry in queue / NULL if empty
2533 **/
2534static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
2535{
2536 struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
2537 struct ibmvfc_async_crq *crq;
2538
2539 crq = &async_crq->msgs[async_crq->cur];
2540 if (crq->valid & 0x80) {
2541 if (++async_crq->cur == async_crq->size)
2542 async_crq->cur = 0;
2543 } else
2544 crq = NULL;
2545
2546 return crq;
2547}
2548
2549/**
2550 * ibmvfc_next_crq - Returns the next entry in message queue
2551 * @vhost: ibmvfc host struct
2552 *
2553 * Returns:
2554 * Pointer to next entry in queue / NULL if empty
2555 **/
2556static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
2557{
2558 struct ibmvfc_crq_queue *queue = &vhost->crq;
2559 struct ibmvfc_crq *crq;
2560
2561 crq = &queue->msgs[queue->cur];
2562 if (crq->valid & 0x80) {
2563 if (++queue->cur == queue->size)
2564 queue->cur = 0;
2565 } else
2566 crq = NULL;
2567
2568 return crq;
2569}
2570
2571/**
2572 * ibmvfc_interrupt - Interrupt handler
2573 * @irq: number of irq to handle, not used
2574 * @dev_instance: ibmvfc_host that received interrupt
2575 *
2576 * Returns:
2577 * IRQ_HANDLED
2578 **/
2579static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
2580{
2581 struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
2582 struct vio_dev *vdev = to_vio_dev(vhost->dev);
2583 struct ibmvfc_crq *crq;
2584 struct ibmvfc_async_crq *async;
2585 unsigned long flags;
2586 int done = 0;
2587
2588 spin_lock_irqsave(vhost->host->host_lock, flags);
2589 vio_disable_interrupts(to_vio_dev(vhost->dev));
2590 while (!done) {
2591 /* Pull all the valid messages off the CRQ */
2592 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2593 ibmvfc_handle_crq(crq, vhost);
2594 crq->valid = 0;
2595 }
2596
2597 /* Pull all the valid messages off the async CRQ */
2598 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2599 ibmvfc_handle_async(async, vhost);
2600 async->valid = 0;
2601 }
2602
2603 vio_enable_interrupts(vdev);
2604 if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
2605 vio_disable_interrupts(vdev);
2606 ibmvfc_handle_crq(crq, vhost);
2607 crq->valid = 0;
2608 } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2609 vio_disable_interrupts(vdev);
2610 ibmvfc_handle_async(async, vhost);
2611 crq->valid = 0;
2612 } else
2613 done = 1;
2614 }
2615
2616 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2617 return IRQ_HANDLED;
2618}
2619
2620/**
2621 * ibmvfc_init_tgt - Set the next init job step for the target
2622 * @tgt: ibmvfc target struct
2623 * @job_step: job step to perform
2624 *
2625 **/
2626static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2627 void (*job_step) (struct ibmvfc_target *))
2628{
2629 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
2630 tgt->job_step = job_step;
2631 wake_up(&tgt->vhost->work_wait_q);
2632}
2633
2634/**
2635 * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
2636 * @tgt: ibmvfc target struct
2637 * @job_step: initialization job step
2638 *
2639 **/
2640static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2641 void (*job_step) (struct ibmvfc_target *))
2642{
2643 if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
2644 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2645 wake_up(&tgt->vhost->work_wait_q);
2646 } else
2647 ibmvfc_init_tgt(tgt, job_step);
2648}
2649
2650/**
2651 * ibmvfc_release_tgt - Free memory allocated for a target
2652 * @kref: kref struct
2653 *
2654 **/
2655static void ibmvfc_release_tgt(struct kref *kref)
2656{
2657 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
2658 kfree(tgt);
2659}
2660
2661/**
2662 * ibmvfc_tgt_prli_done - Completion handler for Process Login
2663 * @evt: ibmvfc event struct
2664 *
2665 **/
2666static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2667{
2668 struct ibmvfc_target *tgt = evt->tgt;
2669 struct ibmvfc_host *vhost = evt->vhost;
2670 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2671 u32 status = rsp->common.status;
2672
2673 vhost->discovery_threads--;
2674 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2675 switch (status) {
2676 case IBMVFC_MAD_SUCCESS:
2677 tgt_dbg(tgt, "Process Login succeeded\n");
2678 tgt->need_login = 0;
2679 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
2680 break;
2681 case IBMVFC_MAD_DRIVER_FAILED:
2682 break;
2683 case IBMVFC_MAD_CRQ_ERROR:
2684 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2685 break;
2686 case IBMVFC_MAD_FAILED:
2687 default:
2688 tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2689 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2690 rsp->status, rsp->error, status);
2691 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2692 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2693 break;
2694 };
2695
2696 kref_put(&tgt->kref, ibmvfc_release_tgt);
2697 ibmvfc_free_event(evt);
2698 wake_up(&vhost->work_wait_q);
2699}
2700
2701/**
2702 * ibmvfc_tgt_send_prli - Send a process login
2703 * @tgt: ibmvfc target struct
2704 *
2705 **/
2706static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
2707{
2708 struct ibmvfc_process_login *prli;
2709 struct ibmvfc_host *vhost = tgt->vhost;
2710 struct ibmvfc_event *evt;
2711
2712 if (vhost->discovery_threads >= disc_threads)
2713 return;
2714
2715 kref_get(&tgt->kref);
2716 evt = ibmvfc_get_event(vhost);
2717 vhost->discovery_threads++;
2718 ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
2719 evt->tgt = tgt;
2720 prli = &evt->iu.prli;
2721 memset(prli, 0, sizeof(*prli));
2722 prli->common.version = 1;
2723 prli->common.opcode = IBMVFC_PROCESS_LOGIN;
2724 prli->common.length = sizeof(*prli);
2725 prli->scsi_id = tgt->scsi_id;
2726
2727 prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
2728 prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
2729 prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
2730
2731 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2732 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2733 vhost->discovery_threads--;
2734 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2735 kref_put(&tgt->kref, ibmvfc_release_tgt);
2736 } else
2737 tgt_dbg(tgt, "Sent process login\n");
2738}
2739
2740/**
2741 * ibmvfc_tgt_plogi_done - Completion handler for Port Login
2742 * @evt: ibmvfc event struct
2743 *
2744 **/
2745static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2746{
2747 struct ibmvfc_target *tgt = evt->tgt;
2748 struct ibmvfc_host *vhost = evt->vhost;
2749 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
2750 u32 status = rsp->common.status;
2751
2752 vhost->discovery_threads--;
2753 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2754 switch (status) {
2755 case IBMVFC_MAD_SUCCESS:
2756 tgt_dbg(tgt, "Port Login succeeded\n");
2757 if (tgt->ids.port_name &&
2758 tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
2759 vhost->reinit = 1;
2760 tgt_dbg(tgt, "Port re-init required\n");
2761 break;
2762 }
2763 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
2764 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
2765 tgt->ids.port_id = tgt->scsi_id;
2766 tgt->ids.roles = FC_PORT_ROLE_FCP_TARGET;
2767 memcpy(&tgt->service_parms, &rsp->service_parms,
2768 sizeof(tgt->service_parms));
2769 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
2770 sizeof(tgt->service_parms_change));
2771 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
2772 break;
2773 case IBMVFC_MAD_DRIVER_FAILED:
2774 break;
2775 case IBMVFC_MAD_CRQ_ERROR:
2776 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2777 break;
2778 case IBMVFC_MAD_FAILED:
2779 default:
2780 tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2781 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2782 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2783 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2784
2785 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2786 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2787 break;
2788 };
2789
2790 kref_put(&tgt->kref, ibmvfc_release_tgt);
2791 ibmvfc_free_event(evt);
2792 wake_up(&vhost->work_wait_q);
2793}
2794
2795/**
2796 * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
2797 * @tgt: ibmvfc target struct
2798 *
2799 **/
2800static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
2801{
2802 struct ibmvfc_port_login *plogi;
2803 struct ibmvfc_host *vhost = tgt->vhost;
2804 struct ibmvfc_event *evt;
2805
2806 if (vhost->discovery_threads >= disc_threads)
2807 return;
2808
2809 kref_get(&tgt->kref);
2810 evt = ibmvfc_get_event(vhost);
2811 vhost->discovery_threads++;
2812 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2813 ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
2814 evt->tgt = tgt;
2815 plogi = &evt->iu.plogi;
2816 memset(plogi, 0, sizeof(*plogi));
2817 plogi->common.version = 1;
2818 plogi->common.opcode = IBMVFC_PORT_LOGIN;
2819 plogi->common.length = sizeof(*plogi);
2820 plogi->scsi_id = tgt->scsi_id;
2821
2822 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2823 vhost->discovery_threads--;
2824 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2825 kref_put(&tgt->kref, ibmvfc_release_tgt);
2826 } else
2827 tgt_dbg(tgt, "Sent port login\n");
2828}
2829
2830/**
2831 * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
2832 * @evt: ibmvfc event struct
2833 *
2834 **/
2835static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
2836{
2837 struct ibmvfc_target *tgt = evt->tgt;
2838 struct ibmvfc_host *vhost = evt->vhost;
2839 struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
2840 u32 status = rsp->common.status;
2841
2842 vhost->discovery_threads--;
2843 ibmvfc_free_event(evt);
2844 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2845
2846 switch (status) {
2847 case IBMVFC_MAD_SUCCESS:
2848 tgt_dbg(tgt, "Implicit Logout succeeded\n");
2849 break;
2850 case IBMVFC_MAD_DRIVER_FAILED:
2851 kref_put(&tgt->kref, ibmvfc_release_tgt);
2852 wake_up(&vhost->work_wait_q);
2853 return;
2854 case IBMVFC_MAD_FAILED:
2855 default:
2856 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
2857 break;
2858 };
2859
2860 if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
2861 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
2862 else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
2863 tgt->scsi_id != tgt->new_scsi_id)
2864 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2865 kref_put(&tgt->kref, ibmvfc_release_tgt);
2866 wake_up(&vhost->work_wait_q);
2867}
2868
2869/**
2870 * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
2871 * @tgt: ibmvfc target struct
2872 *
2873 **/
2874static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
2875{
2876 struct ibmvfc_implicit_logout *mad;
2877 struct ibmvfc_host *vhost = tgt->vhost;
2878 struct ibmvfc_event *evt;
2879
2880 if (vhost->discovery_threads >= disc_threads)
2881 return;
2882
2883 kref_get(&tgt->kref);
2884 evt = ibmvfc_get_event(vhost);
2885 vhost->discovery_threads++;
2886 ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
2887 evt->tgt = tgt;
2888 mad = &evt->iu.implicit_logout;
2889 memset(mad, 0, sizeof(*mad));
2890 mad->common.version = 1;
2891 mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
2892 mad->common.length = sizeof(*mad);
2893 mad->old_scsi_id = tgt->scsi_id;
2894
2895 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2896 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2897 vhost->discovery_threads--;
2898 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2899 kref_put(&tgt->kref, ibmvfc_release_tgt);
2900 } else
2901 tgt_dbg(tgt, "Sent Implicit Logout\n");
2902}
2903
2904/**
2905 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
2906 * @evt: ibmvfc event struct
2907 *
2908 **/
2909static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
2910{
2911 struct ibmvfc_target *tgt = evt->tgt;
2912 struct ibmvfc_host *vhost = evt->vhost;
2913 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
2914 u32 status = rsp->common.status;
2915
2916 vhost->discovery_threads--;
2917 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2918 switch (status) {
2919 case IBMVFC_MAD_SUCCESS:
2920 tgt_dbg(tgt, "Query Target succeeded\n");
2921 tgt->new_scsi_id = rsp->scsi_id;
2922 if (rsp->scsi_id != tgt->scsi_id)
2923 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
2924 break;
2925 case IBMVFC_MAD_DRIVER_FAILED:
2926 break;
2927 case IBMVFC_MAD_CRQ_ERROR:
2928 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
2929 break;
2930 case IBMVFC_MAD_FAILED:
2931 default:
2932 tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2933 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2934 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2935 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
2936
2937 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
2938 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
2939 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
2940 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2941 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2942 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
2943 break;
2944 };
2945
2946 kref_put(&tgt->kref, ibmvfc_release_tgt);
2947 ibmvfc_free_event(evt);
2948 wake_up(&vhost->work_wait_q);
2949}
2950
2951/**
2952 * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
2953 * @tgt: ibmvfc target struct
2954 *
2955 **/
2956static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
2957{
2958 struct ibmvfc_query_tgt *query_tgt;
2959 struct ibmvfc_host *vhost = tgt->vhost;
2960 struct ibmvfc_event *evt;
2961
2962 if (vhost->discovery_threads >= disc_threads)
2963 return;
2964
2965 kref_get(&tgt->kref);
2966 evt = ibmvfc_get_event(vhost);
2967 vhost->discovery_threads++;
2968 evt->tgt = tgt;
2969 ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
2970 query_tgt = &evt->iu.query_tgt;
2971 memset(query_tgt, 0, sizeof(*query_tgt));
2972 query_tgt->common.version = 1;
2973 query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
2974 query_tgt->common.length = sizeof(*query_tgt);
2975 query_tgt->wwpn = tgt->ids.port_name;
2976
2977 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
2978 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
2979 vhost->discovery_threads--;
2980 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2981 kref_put(&tgt->kref, ibmvfc_release_tgt);
2982 } else
2983 tgt_dbg(tgt, "Sent Query Target\n");
2984}
2985
2986/**
2987 * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
2988 * @vhost: ibmvfc host struct
2989 * @scsi_id: SCSI ID to allocate target for
2990 *
2991 * Returns:
2992 * 0 on success / other on failure
2993 **/
2994static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
2995{
2996 struct ibmvfc_target *tgt;
2997 unsigned long flags;
2998
2999 spin_lock_irqsave(vhost->host->host_lock, flags);
3000 list_for_each_entry(tgt, &vhost->targets, queue) {
3001 if (tgt->scsi_id == scsi_id) {
3002 if (tgt->need_login)
3003 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3004 goto unlock_out;
3005 }
3006 }
3007 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3008
3009 tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
3010 if (!tgt) {
3011 dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n",
3012 scsi_id);
3013 return -ENOMEM;
3014 }
3015
3016 tgt->scsi_id = scsi_id;
3017 tgt->new_scsi_id = scsi_id;
3018 tgt->vhost = vhost;
3019 tgt->need_login = 1;
3020 kref_init(&tgt->kref);
3021 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3022 spin_lock_irqsave(vhost->host->host_lock, flags);
3023 list_add_tail(&tgt->queue, &vhost->targets);
3024
3025unlock_out:
3026 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3027 return 0;
3028}
3029
3030/**
3031 * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
3032 * @vhost: ibmvfc host struct
3033 *
3034 * Returns:
3035 * 0 on success / other on failure
3036 **/
3037static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
3038{
3039 int i, rc;
3040
3041 for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
3042 rc = ibmvfc_alloc_target(vhost,
3043 vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
3044
3045 return rc;
3046}
3047
3048/**
3049 * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
3050 * @evt: ibmvfc event struct
3051 *
3052 **/
3053static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3054{
3055 struct ibmvfc_host *vhost = evt->vhost;
3056 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3057 u32 mad_status = rsp->common.status;
3058
3059 switch (mad_status) {
3060 case IBMVFC_MAD_SUCCESS:
3061 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
3062 vhost->num_targets = rsp->num_written;
3063 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3064 break;
3065 case IBMVFC_MAD_FAILED:
3066 dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
3067 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3068 ibmvfc_retry_host_init(vhost);
3069 break;
3070 case IBMVFC_MAD_DRIVER_FAILED:
3071 break;
3072 default:
3073 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
3074 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3075 break;
3076 }
3077
3078 ibmvfc_free_event(evt);
3079 wake_up(&vhost->work_wait_q);
3080}
3081
3082/**
3083 * ibmvfc_discover_targets - Send Discover Targets MAD
3084 * @vhost: ibmvfc host struct
3085 *
3086 **/
3087static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
3088{
3089 struct ibmvfc_discover_targets *mad;
3090 struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3091
3092 ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
3093 mad = &evt->iu.discover_targets;
3094 memset(mad, 0, sizeof(*mad));
3095 mad->common.version = 1;
3096 mad->common.opcode = IBMVFC_DISC_TARGETS;
3097 mad->common.length = sizeof(*mad);
3098 mad->bufflen = vhost->disc_buf_sz;
3099 mad->buffer.va = vhost->disc_buf_dma;
3100 mad->buffer.len = vhost->disc_buf_sz;
3101 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3102
3103 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3104 ibmvfc_dbg(vhost, "Sent discover targets\n");
3105 else
3106 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3107}
3108
3109/**
3110 * ibmvfc_npiv_login_done - Completion handler for NPIV Login
3111 * @evt: ibmvfc event struct
3112 *
3113 **/
3114static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3115{
3116 struct ibmvfc_host *vhost = evt->vhost;
3117 u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3118 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3119 unsigned int npiv_max_sectors;
3120
3121 switch (mad_status) {
3122 case IBMVFC_MAD_SUCCESS:
3123 ibmvfc_free_event(evt);
3124 break;
3125 case IBMVFC_MAD_FAILED:
3126 dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
3127 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3128 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3129 ibmvfc_retry_host_init(vhost);
3130 else
3131 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3132 ibmvfc_free_event(evt);
3133 return;
3134 case IBMVFC_MAD_CRQ_ERROR:
3135 ibmvfc_retry_host_init(vhost);
3136 case IBMVFC_MAD_DRIVER_FAILED:
3137 ibmvfc_free_event(evt);
3138 return;
3139 default:
3140 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
3141 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3142 ibmvfc_free_event(evt);
3143 return;
3144 }
3145
3146 vhost->client_migrated = 0;
3147
3148 if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
3149 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
3150 rsp->flags);
3151 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3152 wake_up(&vhost->work_wait_q);
3153 return;
3154 }
3155
3156 if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
3157 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
3158 rsp->max_cmds);
3159 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3160 wake_up(&vhost->work_wait_q);
3161 return;
3162 }
3163
3164 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3165 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3166 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
3167 rsp->drc_name, npiv_max_sectors);
3168
3169 fc_host_fabric_name(vhost->host) = rsp->node_name;
3170 fc_host_node_name(vhost->host) = rsp->node_name;
3171 fc_host_port_name(vhost->host) = rsp->port_name;
3172 fc_host_port_id(vhost->host) = rsp->scsi_id;
3173 fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
3174 fc_host_supported_classes(vhost->host) = 0;
3175 if (rsp->service_parms.class1_parms[0] & 0x80000000)
3176 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
3177 if (rsp->service_parms.class2_parms[0] & 0x80000000)
3178 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
3179 if (rsp->service_parms.class3_parms[0] & 0x80000000)
3180 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
3181 fc_host_maxframe_size(vhost->host) =
3182 rsp->service_parms.common.bb_rcv_sz & 0x0fff;
3183
3184 vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
3185 vhost->host->max_sectors = npiv_max_sectors;
3186 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3187 wake_up(&vhost->work_wait_q);
3188}
3189
3190/**
3191 * ibmvfc_npiv_login - Sends NPIV login
3192 * @vhost: ibmvfc host struct
3193 *
3194 **/
3195static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3196{
3197 struct ibmvfc_npiv_login_mad *mad;
3198 struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
3199
3200 ibmvfc_gather_partition_info(vhost);
3201 ibmvfc_set_login_info(vhost);
3202 ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
3203
3204 memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
3205 mad = &evt->iu.npiv_login;
3206 memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
3207 mad->common.version = 1;
3208 mad->common.opcode = IBMVFC_NPIV_LOGIN;
3209 mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
3210 mad->buffer.va = vhost->login_buf_dma;
3211 mad->buffer.len = sizeof(*vhost->login_buf);
3212
3213 memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
3214 vhost->async_crq.cur = 0;
3215 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
3216
3217 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3218 ibmvfc_dbg(vhost, "Sent NPIV login\n");
3219 else
3220 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3221};
3222
3223/**
3224 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
3225 * @vhost: ibmvfc host struct
3226 *
3227 * Returns:
3228 * 1 if work to do / 0 if not
3229 **/
3230static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
3231{
3232 struct ibmvfc_target *tgt;
3233
3234 list_for_each_entry(tgt, &vhost->targets, queue) {
3235 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
3236 tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3237 return 1;
3238 }
3239
3240 return 0;
3241}
3242
3243/**
3244 * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
3245 * @vhost: ibmvfc host struct
3246 *
3247 * Returns:
3248 * 1 if work to do / 0 if not
3249 **/
3250static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3251{
3252 struct ibmvfc_target *tgt;
3253
3254 if (kthread_should_stop())
3255 return 1;
3256 switch (vhost->action) {
3257 case IBMVFC_HOST_ACTION_NONE:
3258 case IBMVFC_HOST_ACTION_INIT_WAIT:
3259 return 0;
3260 case IBMVFC_HOST_ACTION_TGT_INIT:
3261 case IBMVFC_HOST_ACTION_QUERY_TGTS:
3262 if (vhost->discovery_threads == disc_threads)
3263 return 0;
3264 list_for_each_entry(tgt, &vhost->targets, queue)
3265 if (tgt->action == IBMVFC_TGT_ACTION_INIT)
3266 return 1;
3267 list_for_each_entry(tgt, &vhost->targets, queue)
3268 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3269 return 0;
3270 return 1;
3271 case IBMVFC_HOST_ACTION_INIT:
3272 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3273 case IBMVFC_HOST_ACTION_TGT_ADD:
3274 case IBMVFC_HOST_ACTION_TGT_DEL:
3275 case IBMVFC_HOST_ACTION_QUERY:
3276 default:
3277 break;
3278 };
3279
3280 return 1;
3281}
3282
3283/**
3284 * ibmvfc_work_to_do - Is there task level work to do?
3285 * @vhost: ibmvfc host struct
3286 *
3287 * Returns:
3288 * 1 if work to do / 0 if not
3289 **/
3290static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3291{
3292 unsigned long flags;
3293 int rc;
3294
3295 spin_lock_irqsave(vhost->host->host_lock, flags);
3296 rc = __ibmvfc_work_to_do(vhost);
3297 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3298 return rc;
3299}
3300
3301/**
3302 * ibmvfc_log_ae - Log async events if necessary
3303 * @vhost: ibmvfc host struct
3304 * @events: events to log
3305 *
3306 **/
3307static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
3308{
3309 if (events & IBMVFC_AE_RSCN)
3310 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
3311 if ((events & IBMVFC_AE_LINKDOWN) &&
3312 vhost->state >= IBMVFC_HALTED)
3313 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
3314 if ((events & IBMVFC_AE_LINKUP) &&
3315 vhost->state == IBMVFC_INITIALIZING)
3316 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
3317}
3318
3319/**
3320 * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
3321 * @tgt: ibmvfc target struct
3322 *
3323 **/
3324static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3325{
3326 struct ibmvfc_host *vhost = tgt->vhost;
3327 struct fc_rport *rport;
3328 unsigned long flags;
3329
3330 tgt_dbg(tgt, "Adding rport\n");
3331 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3332 spin_lock_irqsave(vhost->host->host_lock, flags);
3333 tgt->rport = rport;
3334 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3335 if (rport) {
3336 tgt_dbg(tgt, "rport add succeeded\n");
3337 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3338 rport->supported_classes = 0;
3339 if (tgt->service_parms.class1_parms[0] & 0x80000000)
3340 rport->supported_classes |= FC_COS_CLASS1;
3341 if (tgt->service_parms.class2_parms[0] & 0x80000000)
3342 rport->supported_classes |= FC_COS_CLASS2;
3343 if (tgt->service_parms.class3_parms[0] & 0x80000000)
3344 rport->supported_classes |= FC_COS_CLASS3;
3345 } else
3346 tgt_dbg(tgt, "rport add failed\n");
3347 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3348}
3349
3350/**
3351 * ibmvfc_do_work - Do task level work
3352 * @vhost: ibmvfc host struct
3353 *
3354 **/
3355static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3356{
3357 struct ibmvfc_target *tgt;
3358 unsigned long flags;
3359 struct fc_rport *rport;
3360
3361 ibmvfc_log_ae(vhost, vhost->events_to_log);
3362 spin_lock_irqsave(vhost->host->host_lock, flags);
3363 vhost->events_to_log = 0;
3364 switch (vhost->action) {
3365 case IBMVFC_HOST_ACTION_NONE:
3366 case IBMVFC_HOST_ACTION_INIT_WAIT:
3367 break;
3368 case IBMVFC_HOST_ACTION_INIT:
3369 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3370 vhost->job_step(vhost);
3371 break;
3372 case IBMVFC_HOST_ACTION_QUERY:
3373 list_for_each_entry(tgt, &vhost->targets, queue)
3374 ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
3375 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
3376 break;
3377 case IBMVFC_HOST_ACTION_QUERY_TGTS:
3378 list_for_each_entry(tgt, &vhost->targets, queue) {
3379 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
3380 tgt->job_step(tgt);
3381 break;
3382 }
3383 }
3384
3385 if (!ibmvfc_dev_init_to_do(vhost))
3386 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
3387 break;
3388 case IBMVFC_HOST_ACTION_TGT_DEL:
3389 list_for_each_entry(tgt, &vhost->targets, queue) {
3390 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3391 tgt_dbg(tgt, "Deleting rport\n");
3392 rport = tgt->rport;
3393 tgt->rport = NULL;
3394 list_del(&tgt->queue);
3395 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3396 if (rport)
3397 fc_remote_port_delete(rport);
3398 kref_put(&tgt->kref, ibmvfc_release_tgt);
3399 return;
3400 }
3401 }
3402
3403 if (vhost->state == IBMVFC_INITIALIZING) {
3404 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
3405 vhost->job_step = ibmvfc_discover_targets;
3406 } else {
3407 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3408 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3409 scsi_unblock_requests(vhost->host);
3410 wake_up(&vhost->init_wait_q);
3411 return;
3412 }
3413 break;
3414 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3415 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
3416 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3417 ibmvfc_alloc_targets(vhost);
3418 spin_lock_irqsave(vhost->host->host_lock, flags);
3419 break;
3420 case IBMVFC_HOST_ACTION_TGT_INIT:
3421 list_for_each_entry(tgt, &vhost->targets, queue) {
3422 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
3423 tgt->job_step(tgt);
3424 break;
3425 }
3426 }
3427
3428 if (!ibmvfc_dev_init_to_do(vhost)) {
3429 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3430 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
3431 vhost->init_retries = 0;
3432 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3433 scsi_unblock_requests(vhost->host);
3434 return;
3435 }
3436 break;
3437 case IBMVFC_HOST_ACTION_TGT_ADD:
3438 list_for_each_entry(tgt, &vhost->targets, queue) {
3439 if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
3440 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3441 ibmvfc_tgt_add_rport(tgt);
3442 return;
3443 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3444 tgt_dbg(tgt, "Deleting rport\n");
3445 rport = tgt->rport;
3446 tgt->rport = NULL;
3447 list_del(&tgt->queue);
3448 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3449 if (rport)
3450 fc_remote_port_delete(rport);
3451 kref_put(&tgt->kref, ibmvfc_release_tgt);
3452 return;
3453 }
3454 }
3455
3456 if (vhost->reinit) {
3457 vhost->reinit = 0;
3458 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3459 } else {
3460 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3461 wake_up(&vhost->init_wait_q);
3462 }
3463 break;
3464 default:
3465 break;
3466 };
3467
3468 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3469}
3470
3471/**
3472 * ibmvfc_work - Do task level work
3473 * @data: ibmvfc host struct
3474 *
3475 * Returns:
3476 * zero
3477 **/
3478static int ibmvfc_work(void *data)
3479{
3480 struct ibmvfc_host *vhost = data;
3481 int rc;
3482
3483 set_user_nice(current, -20);
3484
3485 while (1) {
3486 rc = wait_event_interruptible(vhost->work_wait_q,
3487 ibmvfc_work_to_do(vhost));
3488
3489 BUG_ON(rc);
3490
3491 if (kthread_should_stop())
3492 break;
3493
3494 ibmvfc_do_work(vhost);
3495 }
3496
3497 ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
3498 return 0;
3499}
3500
3501/**
3502 * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
3503 * @vhost: ibmvfc host struct
3504 *
3505 * Allocates a page for messages, maps it for dma, and registers
3506 * the crq with the hypervisor.
3507 *
3508 * Return value:
3509 * zero on success / other on failure
3510 **/
3511static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
3512{
3513 int rc, retrc = -ENOMEM;
3514 struct device *dev = vhost->dev;
3515 struct vio_dev *vdev = to_vio_dev(dev);
3516 struct ibmvfc_crq_queue *crq = &vhost->crq;
3517
3518 ENTER;
3519 crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
3520
3521 if (!crq->msgs)
3522 return -ENOMEM;
3523
3524 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3525 crq->msg_token = dma_map_single(dev, crq->msgs,
3526 PAGE_SIZE, DMA_BIDIRECTIONAL);
3527
3528 if (dma_mapping_error(crq->msg_token))
3529 goto map_failed;
3530
3531 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3532 crq->msg_token, PAGE_SIZE);
3533
3534 if (rc == H_RESOURCE)
3535 /* maybe kexecing and resource is busy. try a reset */
3536 retrc = rc = ibmvfc_reset_crq(vhost);
3537
3538 if (rc == H_CLOSED)
3539 dev_warn(dev, "Partner adapter not ready\n");
3540 else if (rc) {
3541 dev_warn(dev, "Error %d opening adapter\n", rc);
3542 goto reg_crq_failed;
3543 }
3544
3545 retrc = 0;
3546
3547 if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
3548 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
3549 goto req_irq_failed;
3550 }
3551
3552 if ((rc = vio_enable_interrupts(vdev))) {
3553 dev_err(dev, "Error %d enabling interrupts\n", rc);
3554 goto req_irq_failed;
3555 }
3556
3557 crq->cur = 0;
3558 LEAVE;
3559 return retrc;
3560
3561req_irq_failed:
3562 do {
3563 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3564 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3565reg_crq_failed:
3566 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3567map_failed:
3568 free_page((unsigned long)crq->msgs);
3569 return retrc;
3570}
3571
3572/**
3573 * ibmvfc_free_mem - Free memory for vhost
3574 * @vhost: ibmvfc host struct
3575 *
3576 * Return value:
3577 * none
3578 **/
3579static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
3580{
3581 struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
3582
3583 ENTER;
3584 mempool_destroy(vhost->tgt_pool);
3585 kfree(vhost->trace);
3586 dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
3587 vhost->disc_buf_dma);
3588 dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
3589 vhost->login_buf, vhost->login_buf_dma);
3590 dma_pool_destroy(vhost->sg_pool);
3591 dma_unmap_single(vhost->dev, async_q->msg_token,
3592 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
3593 free_page((unsigned long)async_q->msgs);
3594 LEAVE;
3595}
3596
3597/**
3598 * ibmvfc_alloc_mem - Allocate memory for vhost
3599 * @vhost: ibmvfc host struct
3600 *
3601 * Return value:
3602 * 0 on success / non-zero on failure
3603 **/
3604static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
3605{
3606 struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
3607 struct device *dev = vhost->dev;
3608
3609 ENTER;
3610 async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
3611 if (!async_q->msgs) {
3612 dev_err(dev, "Couldn't allocate async queue.\n");
3613 goto nomem;
3614 }
3615
3616 async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
3617 async_q->msg_token = dma_map_single(dev, async_q->msgs,
3618 async_q->size * sizeof(*async_q->msgs),
3619 DMA_BIDIRECTIONAL);
3620
3621 if (dma_mapping_error(async_q->msg_token)) {
3622 dev_err(dev, "Failed to map async queue\n");
3623 goto free_async_crq;
3624 }
3625
3626 vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
3627 SG_ALL * sizeof(struct srp_direct_buf),
3628 sizeof(struct srp_direct_buf), 0);
3629
3630 if (!vhost->sg_pool) {
3631 dev_err(dev, "Failed to allocate sg pool\n");
3632 goto unmap_async_crq;
3633 }
3634
3635 vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
3636 &vhost->login_buf_dma, GFP_KERNEL);
3637
3638 if (!vhost->login_buf) {
3639 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
3640 goto free_sg_pool;
3641 }
3642
3643 vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
3644 vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
3645 &vhost->disc_buf_dma, GFP_KERNEL);
3646
3647 if (!vhost->disc_buf) {
3648 dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
3649 goto free_login_buffer;
3650 }
3651
3652 vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
3653 sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
3654
3655 if (!vhost->trace)
3656 goto free_disc_buffer;
3657
3658 vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
3659 sizeof(struct ibmvfc_target));
3660
3661 if (!vhost->tgt_pool) {
3662 dev_err(dev, "Couldn't allocate target memory pool\n");
3663 goto free_trace;
3664 }
3665
3666 LEAVE;
3667 return 0;
3668
3669free_trace:
3670 kfree(vhost->trace);
3671free_disc_buffer:
3672 dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
3673 vhost->disc_buf_dma);
3674free_login_buffer:
3675 dma_free_coherent(dev, sizeof(*vhost->login_buf),
3676 vhost->login_buf, vhost->login_buf_dma);
3677free_sg_pool:
3678 dma_pool_destroy(vhost->sg_pool);
3679unmap_async_crq:
3680 dma_unmap_single(dev, async_q->msg_token,
3681 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
3682free_async_crq:
3683 free_page((unsigned long)async_q->msgs);
3684nomem:
3685 LEAVE;
3686 return -ENOMEM;
3687}
3688
3689/**
3690 * ibmvfc_probe - Adapter hot plug add entry point
3691 * @vdev: vio device struct
3692 * @id: vio device id struct
3693 *
3694 * Return value:
3695 * 0 on success / non-zero on failure
3696 **/
3697static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
3698{
3699 struct ibmvfc_host *vhost;
3700 struct Scsi_Host *shost;
3701 struct device *dev = &vdev->dev;
3702 int rc = -ENOMEM;
3703
3704 ENTER;
3705 shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
3706 if (!shost) {
3707 dev_err(dev, "Couldn't allocate host data\n");
3708 goto out;
3709 }
3710
3711 shost->transportt = ibmvfc_transport_template;
3712 shost->can_queue = max_requests;
3713 shost->max_lun = max_lun;
3714 shost->max_id = max_targets;
3715 shost->max_sectors = IBMVFC_MAX_SECTORS;
3716 shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
3717 shost->unique_id = shost->host_no;
3718
3719 vhost = shost_priv(shost);
3720 INIT_LIST_HEAD(&vhost->sent);
3721 INIT_LIST_HEAD(&vhost->free);
3722 INIT_LIST_HEAD(&vhost->targets);
3723 sprintf(vhost->name, IBMVFC_NAME);
3724 vhost->host = shost;
3725 vhost->dev = dev;
3726 vhost->partition_number = -1;
3727 vhost->log_level = log_level;
3728 strcpy(vhost->partition_name, "UNKNOWN");
3729 init_waitqueue_head(&vhost->work_wait_q);
3730 init_waitqueue_head(&vhost->init_wait_q);
3731
3732 if ((rc = ibmvfc_alloc_mem(vhost)))
3733 goto free_scsi_host;
3734
3735 vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
3736 shost->host_no);
3737
3738 if (IS_ERR(vhost->work_thread)) {
3739 dev_err(dev, "Couldn't create kernel thread: %ld\n",
3740 PTR_ERR(vhost->work_thread));
3741 goto free_host_mem;
3742 }
3743
3744 if ((rc = ibmvfc_init_crq(vhost))) {
3745 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3746 goto kill_kthread;
3747 }
3748
3749 if ((rc = ibmvfc_init_event_pool(vhost))) {
3750 dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
3751 goto release_crq;
3752 }
3753
3754 if ((rc = scsi_add_host(shost, dev)))
3755 goto release_event_pool;
3756
3757 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
3758 &ibmvfc_trace_attr))) {
3759 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
3760 goto remove_shost;
3761 }
3762
3763 dev_set_drvdata(dev, vhost);
3764 spin_lock(&ibmvfc_driver_lock);
3765 list_add_tail(&vhost->queue, &ibmvfc_head);
3766 spin_unlock(&ibmvfc_driver_lock);
3767
3768 ibmvfc_send_crq_init(vhost);
3769 scsi_scan_host(shost);
3770 return 0;
3771
3772remove_shost:
3773 scsi_remove_host(shost);
3774release_event_pool:
3775 ibmvfc_free_event_pool(vhost);
3776release_crq:
3777 ibmvfc_release_crq_queue(vhost);
3778kill_kthread:
3779 kthread_stop(vhost->work_thread);
3780free_host_mem:
3781 ibmvfc_free_mem(vhost);
3782free_scsi_host:
3783 scsi_host_put(shost);
3784out:
3785 LEAVE;
3786 return rc;
3787}
3788
3789/**
3790 * ibmvfc_remove - Adapter hot plug remove entry point
3791 * @vdev: vio device struct
3792 *
3793 * Return value:
3794 * 0
3795 **/
3796static int ibmvfc_remove(struct vio_dev *vdev)
3797{
3798 struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
3799 unsigned long flags;
3800
3801 ENTER;
3802 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
3803 kthread_stop(vhost->work_thread);
3804 fc_remove_host(vhost->host);
3805 scsi_remove_host(vhost->host);
3806 ibmvfc_release_crq_queue(vhost);
3807
3808 spin_lock_irqsave(vhost->host->host_lock, flags);
3809 ibmvfc_purge_requests(vhost, DID_ERROR);
3810 ibmvfc_free_event_pool(vhost);
3811 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3812
3813 ibmvfc_free_mem(vhost);
3814 spin_lock(&ibmvfc_driver_lock);
3815 list_del(&vhost->queue);
3816 spin_unlock(&ibmvfc_driver_lock);
3817 scsi_host_put(vhost->host);
3818 LEAVE;
3819 return 0;
3820}
3821
3822static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
3823 {"fcp", "IBM,vfc-client"},
3824 { "", "" }
3825};
3826MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
3827
3828static struct vio_driver ibmvfc_driver = {
3829 .id_table = ibmvfc_device_table,
3830 .probe = ibmvfc_probe,
3831 .remove = ibmvfc_remove,
3832 .driver = {
3833 .name = IBMVFC_NAME,
3834 .owner = THIS_MODULE,
3835 }
3836};
3837
3838static struct fc_function_template ibmvfc_transport_functions = {
3839 .show_host_fabric_name = 1,
3840 .show_host_node_name = 1,
3841 .show_host_port_name = 1,
3842 .show_host_supported_classes = 1,
3843 .show_host_port_type = 1,
3844 .show_host_port_id = 1,
3845
3846 .get_host_port_state = ibmvfc_get_host_port_state,
3847 .show_host_port_state = 1,
3848
3849 .get_host_speed = ibmvfc_get_host_speed,
3850 .show_host_speed = 1,
3851
3852 .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
3853 .terminate_rport_io = ibmvfc_terminate_rport_io,
3854
3855 .show_rport_maxframe_size = 1,
3856 .show_rport_supported_classes = 1,
3857
3858 .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
3859 .show_rport_dev_loss_tmo = 1,
3860
3861 .get_starget_node_name = ibmvfc_get_starget_node_name,
3862 .show_starget_node_name = 1,
3863
3864 .get_starget_port_name = ibmvfc_get_starget_port_name,
3865 .show_starget_port_name = 1,
3866
3867 .get_starget_port_id = ibmvfc_get_starget_port_id,
3868 .show_starget_port_id = 1,
3869};
3870
3871/**
3872 * ibmvfc_module_init - Initialize the ibmvfc module
3873 *
3874 * Return value:
3875 * 0 on success / other on failure
3876 **/
3877static int __init ibmvfc_module_init(void)
3878{
3879 int rc;
3880
3881 if (!firmware_has_feature(FW_FEATURE_VIO))
3882 return -ENODEV;
3883
3884 printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
3885 IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
3886
3887 ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
3888 if (!ibmvfc_transport_template)
3889 return -ENOMEM;
3890
3891 rc = vio_register_driver(&ibmvfc_driver);
3892 if (rc)
3893 fc_release_transport(ibmvfc_transport_template);
3894 return rc;
3895}
3896
3897/**
3898 * ibmvfc_module_exit - Teardown the ibmvfc module
3899 *
3900 * Return value:
3901 * nothing
3902 **/
3903static void __exit ibmvfc_module_exit(void)
3904{
3905 vio_unregister_driver(&ibmvfc_driver);
3906 fc_release_transport(ibmvfc_transport_template);
3907}
3908
3909module_init(ibmvfc_module_init);
3910module_exit(ibmvfc_module_exit);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
new file mode 100644
index 000000000000..057f3c01ed61
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -0,0 +1,682 @@
1/*
2 * ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter
3 *
4 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) IBM Corporation, 2008
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#ifndef _IBMVFC_H
25#define _IBMVFC_H
26
27#include <linux/list.h>
28#include <linux/types.h>
29#include "viosrp.h"
30
31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.0"
33#define IBMVFC_DRIVER_DATE "(July 1, 2008)"
34
35#define IBMVFC_DEFAULT_TIMEOUT 15
36#define IBMVFC_INIT_TIMEOUT 30
37#define IBMVFC_MAX_REQUESTS_DEFAULT 100
38
39#define IBMVFC_DEBUG 0
40#define IBMVFC_MAX_TARGETS 1024
41#define IBMVFC_MAX_LUN 0xffffffff
42#define IBMVFC_MAX_SECTORS 0xffffu
43#define IBMVFC_MAX_DISC_THREADS 4
44#define IBMVFC_TGT_MEMPOOL_SZ 64
45#define IBMVFC_MAX_CMDS_PER_LUN 64
46#define IBMVFC_MAX_INIT_RETRIES 3
47#define IBMVFC_DEV_LOSS_TMO (5 * 60)
48#define IBMVFC_DEFAULT_LOG_LEVEL 2
49#define IBMVFC_MAX_CDB_LEN 16
50
51/*
52 * Ensure we have resources for ERP and initialization:
53 * 1 for ERP
54 * 1 for initialization
55 * 1 for each discovery thread
56 */
57#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + disc_threads)
58
59#define IBMVFC_MAD_SUCCESS 0x00
60#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
61#define IBMVFC_MAD_FAILED 0xF7
62#define IBMVFC_MAD_DRIVER_FAILED 0xEE
63#define IBMVFC_MAD_CRQ_ERROR 0xEF
64
65enum ibmvfc_crq_valid {
66 IBMVFC_CRQ_CMD_RSP = 0x80,
67 IBMVFC_CRQ_INIT_RSP = 0xC0,
68 IBMVFC_CRQ_XPORT_EVENT = 0xFF,
69};
70
71enum ibmvfc_crq_format {
72 IBMVFC_CRQ_INIT = 0x01,
73 IBMVFC_CRQ_INIT_COMPLETE = 0x02,
74 IBMVFC_PARTITION_MIGRATED = 0x06,
75};
76
77enum ibmvfc_cmd_status_flags {
78 IBMVFC_FABRIC_MAPPED = 0x0001,
79 IBMVFC_VIOS_FAILURE = 0x0002,
80 IBMVFC_FC_FAILURE = 0x0004,
81 IBMVFC_FC_SCSI_ERROR = 0x0008,
82 IBMVFC_HW_EVENT_LOGGED = 0x0010,
83 IBMVFC_VIOS_LOGGED = 0x0020,
84};
85
86enum ibmvfc_fabric_mapped_errors {
87 IBMVFC_UNABLE_TO_ESTABLISH = 0x0001,
88 IBMVFC_XPORT_FAULT = 0x0002,
89 IBMVFC_CMD_TIMEOUT = 0x0003,
90 IBMVFC_ENETDOWN = 0x0004,
91 IBMVFC_HW_FAILURE = 0x0005,
92 IBMVFC_LINK_DOWN_ERR = 0x0006,
93 IBMVFC_LINK_DEAD_ERR = 0x0007,
94 IBMVFC_UNABLE_TO_REGISTER = 0x0008,
95 IBMVFC_XPORT_BUSY = 0x000A,
96 IBMVFC_XPORT_DEAD = 0x000B,
97 IBMVFC_CONFIG_ERROR = 0x000C,
98 IBMVFC_NAME_SERVER_FAIL = 0x000D,
99 IBMVFC_LINK_HALTED = 0x000E,
100 IBMVFC_XPORT_GENERAL = 0x8000,
101};
102
103enum ibmvfc_vios_errors {
104 IBMVFC_CRQ_FAILURE = 0x0001,
105 IBMVFC_SW_FAILURE = 0x0002,
106 IBMVFC_INVALID_PARAMETER = 0x0003,
107 IBMVFC_MISSING_PARAMETER = 0x0004,
108 IBMVFC_HOST_IO_BUS = 0x0005,
109 IBMVFC_TRANS_CANCELLED = 0x0006,
110 IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
111 IBMVFC_INSUFFICIENT_RESOURCE = 0x0008,
112 IBMVFC_COMMAND_FAILED = 0x8000,
113};
114
115enum ibmvfc_mad_types {
116 IBMVFC_NPIV_LOGIN = 0x0001,
117 IBMVFC_DISC_TARGETS = 0x0002,
118 IBMVFC_PORT_LOGIN = 0x0004,
119 IBMVFC_PROCESS_LOGIN = 0x0008,
120 IBMVFC_QUERY_TARGET = 0x0010,
121 IBMVFC_IMPLICIT_LOGOUT = 0x0040,
122 IBMVFC_TMF_MAD = 0x0100,
123};
124
125struct ibmvfc_mad_common {
126 u32 version;
127 u32 reserved;
128 u32 opcode;
129 u16 status;
130 u16 length;
131 u64 tag;
132}__attribute__((packed, aligned (8)));
133
134struct ibmvfc_npiv_login_mad {
135 struct ibmvfc_mad_common common;
136 struct srp_direct_buf buffer;
137}__attribute__((packed, aligned (8)));
138
139#define IBMVFC_MAX_NAME 256
140
141struct ibmvfc_npiv_login {
142 u32 ostype;
143#define IBMVFC_OS_LINUX 0x02
144 u32 pad;
145 u64 max_dma_len;
146 u32 max_payload;
147 u32 max_response;
148 u32 partition_num;
149 u32 vfc_frame_version;
150 u16 fcp_version;
151 u16 flags;
152#define IBMVFC_CLIENT_MIGRATED 0x01
153#define IBMVFC_FLUSH_ON_HALT 0x02
154 u32 max_cmds;
155 u64 capabilities;
156#define IBMVFC_CAN_MIGRATE 0x01
157 u64 node_name;
158 struct srp_direct_buf async;
159 u8 partition_name[IBMVFC_MAX_NAME];
160 u8 device_name[IBMVFC_MAX_NAME];
161 u8 drc_name[IBMVFC_MAX_NAME];
162 u64 reserved2[2];
163}__attribute__((packed, aligned (8)));
164
165struct ibmvfc_common_svc_parms {
166 u16 fcph_version;
167 u16 b2b_credit;
168 u16 features;
169 u16 bb_rcv_sz; /* upper nibble is BB_SC_N */
170 u32 ratov;
171 u32 edtov;
172}__attribute__((packed, aligned (4)));
173
174struct ibmvfc_service_parms {
175 struct ibmvfc_common_svc_parms common;
176 u8 port_name[8];
177 u8 node_name[8];
178 u32 class1_parms[4];
179 u32 class2_parms[4];
180 u32 class3_parms[4];
181 u32 obsolete[4];
182 u32 vendor_version[4];
183 u32 services_avail[2];
184 u32 ext_len;
185 u32 reserved[30];
186 u32 clk_sync_qos[2];
187}__attribute__((packed, aligned (4)));
188
189struct ibmvfc_npiv_login_resp {
190 u32 version;
191 u16 status;
192 u16 error;
193 u32 flags;
194#define IBMVFC_NATIVE_FC 0x01
195#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
196 u32 reserved;
197 u64 capabilites;
198 u32 max_cmds;
199 u32 scsi_id_sz;
200 u64 max_dma_len;
201 u64 scsi_id;
202 u64 port_name;
203 u64 node_name;
204 u64 link_speed;
205 u8 partition_name[IBMVFC_MAX_NAME];
206 u8 device_name[IBMVFC_MAX_NAME];
207 u8 port_loc_code[IBMVFC_MAX_NAME];
208 u8 drc_name[IBMVFC_MAX_NAME];
209 struct ibmvfc_service_parms service_parms;
210 u64 reserved2;
211}__attribute__((packed, aligned (8)));
212
213union ibmvfc_npiv_login_data {
214 struct ibmvfc_npiv_login login;
215 struct ibmvfc_npiv_login_resp resp;
216}__attribute__((packed, aligned (8)));
217
218struct ibmvfc_discover_targets_buf {
219 u32 scsi_id[1];
220#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
221};
222
223struct ibmvfc_discover_targets {
224 struct ibmvfc_mad_common common;
225 struct srp_direct_buf buffer;
226 u32 flags;
227 u16 status;
228 u16 error;
229 u32 bufflen;
230 u32 num_avail;
231 u32 num_written;
232 u64 reserved[2];
233}__attribute__((packed, aligned (8)));
234
235enum ibmvfc_fc_reason {
236 IBMVFC_INVALID_ELS_CMD_CODE = 0x01,
237 IBMVFC_INVALID_VERSION = 0x02,
238 IBMVFC_LOGICAL_ERROR = 0x03,
239 IBMVFC_INVALID_CT_IU_SIZE = 0x04,
240 IBMVFC_LOGICAL_BUSY = 0x05,
241 IBMVFC_PROTOCOL_ERROR = 0x07,
242 IBMVFC_UNABLE_TO_PERFORM_REQ = 0x09,
243 IBMVFC_CMD_NOT_SUPPORTED = 0x0B,
244 IBMVFC_SERVER_NOT_AVAIL = 0x0D,
245 IBMVFC_CMD_IN_PROGRESS = 0x0E,
246 IBMVFC_VENDOR_SPECIFIC = 0xFF,
247};
248
249enum ibmvfc_fc_type {
250 IBMVFC_FABRIC_REJECT = 0x01,
251 IBMVFC_PORT_REJECT = 0x02,
252 IBMVFC_LS_REJECT = 0x03,
253 IBMVFC_FABRIC_BUSY = 0x04,
254 IBMVFC_PORT_BUSY = 0x05,
255 IBMVFC_BASIC_REJECT = 0x06,
256};
257
258enum ibmvfc_gs_explain {
259 IBMVFC_PORT_NAME_NOT_REG = 0x02,
260};
261
262struct ibmvfc_port_login {
263 struct ibmvfc_mad_common common;
264 u64 scsi_id;
265 u16 reserved;
266 u16 fc_service_class;
267 u32 blksz;
268 u32 hdr_per_blk;
269 u16 status;
270 u16 error; /* also fc_reason */
271 u16 fc_explain;
272 u16 fc_type;
273 u32 reserved2;
274 struct ibmvfc_service_parms service_parms;
275 struct ibmvfc_service_parms service_parms_change;
276 u64 reserved3[2];
277}__attribute__((packed, aligned (8)));
278
279struct ibmvfc_prli_svc_parms {
280 u8 type;
281#define IBMVFC_SCSI_FCP_TYPE 0x08
282 u8 type_ext;
283 u16 flags;
284#define IBMVFC_PRLI_ORIG_PA_VALID 0x8000
285#define IBMVFC_PRLI_RESP_PA_VALID 0x4000
286#define IBMVFC_PRLI_EST_IMG_PAIR 0x2000
287 u32 orig_pa;
288 u32 resp_pa;
289 u32 service_parms;
290#define IBMVFC_PRLI_TASK_RETRY 0x00000200
291#define IBMVFC_PRLI_RETRY 0x00000100
292#define IBMVFC_PRLI_DATA_OVERLAY 0x00000040
293#define IBMVFC_PRLI_INITIATOR_FUNC 0x00000020
294#define IBMVFC_PRLI_TARGET_FUNC 0x00000010
295#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
296#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001
297}__attribute__((packed, aligned (4)));
298
299struct ibmvfc_process_login {
300 struct ibmvfc_mad_common common;
301 u64 scsi_id;
302 struct ibmvfc_prli_svc_parms parms;
303 u8 reserved[48];
304 u16 status;
305 u16 error; /* also fc_reason */
306 u32 reserved2;
307 u64 reserved3[2];
308}__attribute__((packed, aligned (8)));
309
310struct ibmvfc_query_tgt {
311 struct ibmvfc_mad_common common;
312 u64 wwpn;
313 u64 scsi_id;
314 u16 status;
315 u16 error;
316 u16 fc_explain;
317 u16 fc_type;
318 u64 reserved[2];
319}__attribute__((packed, aligned (8)));
320
321struct ibmvfc_implicit_logout {
322 struct ibmvfc_mad_common common;
323 u64 old_scsi_id;
324 u64 reserved[2];
325}__attribute__((packed, aligned (8)));
326
327struct ibmvfc_tmf {
328 struct ibmvfc_mad_common common;
329 u64 scsi_id;
330 struct scsi_lun lun;
331 u32 flags;
332#define IBMVFC_TMF_ABORT_TASK 0x02
333#define IBMVFC_TMF_ABORT_TASK_SET 0x04
334#define IBMVFC_TMF_LUN_RESET 0x10
335#define IBMVFC_TMF_TGT_RESET 0x20
336#define IBMVFC_TMF_LUA_VALID 0x40
337 u32 cancel_key;
338 u32 my_cancel_key;
339#define IBMVFC_TMF_CANCEL_KEY 0x80000000
340 u32 pad;
341 u64 reserved[2];
342}__attribute__((packed, aligned (8)));
343
344enum ibmvfc_fcp_rsp_info_codes {
345 RSP_NO_FAILURE = 0x00,
346 RSP_TMF_REJECTED = 0x04,
347 RSP_TMF_FAILED = 0x05,
348 RSP_TMF_INVALID_LUN = 0x09,
349};
350
351struct ibmvfc_fcp_rsp_info {
352 u16 reserved;
353 u8 rsp_code;
354 u8 reserved2[4];
355}__attribute__((packed, aligned (2)));
356
357enum ibmvfc_fcp_rsp_flags {
358 FCP_BIDI_RSP = 0x80,
359 FCP_BIDI_READ_RESID_UNDER = 0x40,
360 FCP_BIDI_READ_RESID_OVER = 0x20,
361 FCP_CONF_REQ = 0x10,
362 FCP_RESID_UNDER = 0x08,
363 FCP_RESID_OVER = 0x04,
364 FCP_SNS_LEN_VALID = 0x02,
365 FCP_RSP_LEN_VALID = 0x01,
366};
367
368union ibmvfc_fcp_rsp_data {
369 struct ibmvfc_fcp_rsp_info info;
370 u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
371}__attribute__((packed, aligned (8)));
372
373struct ibmvfc_fcp_rsp {
374 u64 reserved;
375 u16 retry_delay_timer;
376 u8 flags;
377 u8 scsi_status;
378 u32 fcp_resid;
379 u32 fcp_sense_len;
380 u32 fcp_rsp_len;
381 union ibmvfc_fcp_rsp_data data;
382}__attribute__((packed, aligned (8)));
383
384enum ibmvfc_cmd_flags {
385 IBMVFC_SCATTERLIST = 0x0001,
386 IBMVFC_NO_MEM_DESC = 0x0002,
387 IBMVFC_READ = 0x0004,
388 IBMVFC_WRITE = 0x0008,
389 IBMVFC_TMF = 0x0080,
390 IBMVFC_CLASS_3_ERR = 0x0100,
391};
392
393enum ibmvfc_fc_task_attr {
394 IBMVFC_SIMPLE_TASK = 0x00,
395 IBMVFC_HEAD_OF_QUEUE = 0x01,
396 IBMVFC_ORDERED_TASK = 0x02,
397 IBMVFC_ACA_TASK = 0x04,
398};
399
400enum ibmvfc_fc_tmf_flags {
401 IBMVFC_ABORT_TASK_SET = 0x02,
402 IBMVFC_LUN_RESET = 0x10,
403 IBMVFC_TARGET_RESET = 0x20,
404};
405
406struct ibmvfc_fcp_cmd_iu {
407 struct scsi_lun lun;
408 u8 crn;
409 u8 pri_task_attr;
410 u8 tmf_flags;
411 u8 add_cdb_len;
412#define IBMVFC_RDDATA 0x02
413#define IBMVFC_WRDATA 0x01
414 u8 cdb[IBMVFC_MAX_CDB_LEN];
415 u32 xfer_len;
416}__attribute__((packed, aligned (4)));
417
418struct ibmvfc_cmd {
419 u64 task_tag;
420 u32 frame_type;
421 u32 payload_len;
422 u32 resp_len;
423 u32 adapter_resid;
424 u16 status;
425 u16 error;
426 u16 flags;
427 u16 response_flags;
428#define IBMVFC_ADAPTER_RESID_VALID 0x01
429 u32 cancel_key;
430 u32 exchange_id;
431 struct srp_direct_buf ext_func;
432 struct srp_direct_buf ioba;
433 struct srp_direct_buf resp;
434 u64 correlation;
435 u64 tgt_scsi_id;
436 u64 tag;
437 u64 reserved3[2];
438 struct ibmvfc_fcp_cmd_iu iu;
439 struct ibmvfc_fcp_rsp rsp;
440}__attribute__((packed, aligned (8)));
441
442struct ibmvfc_trace_start_entry {
443 u32 xfer_len;
444}__attribute__((packed));
445
446struct ibmvfc_trace_end_entry {
447 u16 status;
448 u16 error;
449 u8 fcp_rsp_flags;
450 u8 rsp_code;
451 u8 scsi_status;
452 u8 reserved;
453}__attribute__((packed));
454
455struct ibmvfc_trace_entry {
456 struct ibmvfc_event *evt;
457 u32 time;
458 u32 scsi_id;
459 u32 lun;
460 u8 fmt;
461 u8 op_code;
462 u8 tmf_flags;
463 u8 type;
464#define IBMVFC_TRC_START 0x00
465#define IBMVFC_TRC_END 0xff
466 union {
467 struct ibmvfc_trace_start_entry start;
468 struct ibmvfc_trace_end_entry end;
469 } u;
470}__attribute__((packed, aligned (8)));
471
472enum ibmvfc_crq_formats {
473 IBMVFC_CMD_FORMAT = 0x01,
474 IBMVFC_ASYNC_EVENT = 0x02,
475 IBMVFC_MAD_FORMAT = 0x04,
476};
477
478enum ibmvfc_async_event {
479 IBMVFC_AE_ELS_PLOGI = 0x0001,
480 IBMVFC_AE_ELS_LOGO = 0x0002,
481 IBMVFC_AE_ELS_PRLO = 0x0004,
482 IBMVFC_AE_SCN_NPORT = 0x0008,
483 IBMVFC_AE_SCN_GROUP = 0x0010,
484 IBMVFC_AE_SCN_DOMAIN = 0x0020,
485 IBMVFC_AE_SCN_FABRIC = 0x0040,
486 IBMVFC_AE_LINK_UP = 0x0080,
487 IBMVFC_AE_LINK_DOWN = 0x0100,
488 IBMVFC_AE_LINK_DEAD = 0x0200,
489 IBMVFC_AE_HALT = 0x0400,
490 IBMVFC_AE_RESUME = 0x0800,
491 IBMVFC_AE_ADAPTER_FAILED = 0x1000,
492};
493
494struct ibmvfc_crq {
495 u8 valid;
496 u8 format;
497 u8 reserved[6];
498 u64 ioba;
499}__attribute__((packed, aligned (8)));
500
501struct ibmvfc_crq_queue {
502 struct ibmvfc_crq *msgs;
503 int size, cur;
504 dma_addr_t msg_token;
505};
506
507struct ibmvfc_async_crq {
508 u8 valid;
509 u8 pad[3];
510 u32 pad2;
511 u64 event;
512 u64 scsi_id;
513 u64 wwpn;
514 u64 node_name;
515 u64 reserved;
516}__attribute__((packed, aligned (8)));
517
518struct ibmvfc_async_crq_queue {
519 struct ibmvfc_async_crq *msgs;
520 int size, cur;
521 dma_addr_t msg_token;
522};
523
524union ibmvfc_iu {
525 struct ibmvfc_mad_common mad_common;
526 struct ibmvfc_npiv_login_mad npiv_login;
527 struct ibmvfc_discover_targets discover_targets;
528 struct ibmvfc_port_login plogi;
529 struct ibmvfc_process_login prli;
530 struct ibmvfc_query_tgt query_tgt;
531 struct ibmvfc_implicit_logout implicit_logout;
532 struct ibmvfc_tmf tmf;
533 struct ibmvfc_cmd cmd;
534}__attribute__((packed, aligned (8)));
535
536enum ibmvfc_target_action {
537 IBMVFC_TGT_ACTION_NONE = 0,
538 IBMVFC_TGT_ACTION_INIT,
539 IBMVFC_TGT_ACTION_INIT_WAIT,
540 IBMVFC_TGT_ACTION_ADD_RPORT,
541 IBMVFC_TGT_ACTION_DEL_RPORT,
542};
543
544struct ibmvfc_target {
545 struct list_head queue;
546 struct ibmvfc_host *vhost;
547 u64 scsi_id;
548 u64 new_scsi_id;
549 struct fc_rport *rport;
550 int target_id;
551 enum ibmvfc_target_action action;
552 int need_login;
553 int init_retries;
554 struct ibmvfc_service_parms service_parms;
555 struct ibmvfc_service_parms service_parms_change;
556 struct fc_rport_identifiers ids;
557 void (*job_step) (struct ibmvfc_target *);
558 struct kref kref;
559};
560
561/* a unit of work for the hosting partition */
562struct ibmvfc_event {
563 struct list_head queue;
564 struct ibmvfc_host *vhost;
565 struct ibmvfc_target *tgt;
566 struct scsi_cmnd *cmnd;
567 atomic_t free;
568 union ibmvfc_iu *xfer_iu;
569 void (*done) (struct ibmvfc_event *);
570 struct ibmvfc_crq crq;
571 union ibmvfc_iu iu;
572 union ibmvfc_iu *sync_iu;
573 struct srp_direct_buf *ext_list;
574 dma_addr_t ext_list_token;
575 struct completion comp;
576 struct timer_list timer;
577};
578
579/* a pool of event structs for use */
580struct ibmvfc_event_pool {
581 struct ibmvfc_event *events;
582 u32 size;
583 union ibmvfc_iu *iu_storage;
584 dma_addr_t iu_token;
585};
586
587enum ibmvfc_host_action {
588 IBMVFC_HOST_ACTION_NONE = 0,
589 IBMVFC_HOST_ACTION_INIT,
590 IBMVFC_HOST_ACTION_INIT_WAIT,
591 IBMVFC_HOST_ACTION_QUERY,
592 IBMVFC_HOST_ACTION_QUERY_TGTS,
593 IBMVFC_HOST_ACTION_TGT_DEL,
594 IBMVFC_HOST_ACTION_ALLOC_TGTS,
595 IBMVFC_HOST_ACTION_TGT_INIT,
596 IBMVFC_HOST_ACTION_TGT_ADD,
597};
598
599enum ibmvfc_host_state {
600 IBMVFC_NO_CRQ = 0,
601 IBMVFC_INITIALIZING,
602 IBMVFC_ACTIVE,
603 IBMVFC_HALTED,
604 IBMVFC_LINK_DOWN,
605 IBMVFC_LINK_DEAD,
606 IBMVFC_HOST_OFFLINE,
607};
608
609struct ibmvfc_host {
610 char name[8];
611 struct list_head queue;
612 struct Scsi_Host *host;
613 enum ibmvfc_host_state state;
614 enum ibmvfc_host_action action;
615#define IBMVFC_NUM_TRACE_INDEX_BITS 8
616#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS)
617#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
618 struct ibmvfc_trace_entry *trace;
619 u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS;
620 int num_targets;
621 struct list_head targets;
622 struct list_head sent;
623 struct list_head free;
624 struct device *dev;
625 struct ibmvfc_event_pool pool;
626 struct dma_pool *sg_pool;
627 mempool_t *tgt_pool;
628 struct ibmvfc_crq_queue crq;
629 struct ibmvfc_async_crq_queue async_crq;
630 struct ibmvfc_npiv_login login_info;
631 union ibmvfc_npiv_login_data *login_buf;
632 dma_addr_t login_buf_dma;
633 int disc_buf_sz;
634 int log_level;
635 struct ibmvfc_discover_targets_buf *disc_buf;
636 int task_set;
637 int init_retries;
638 int discovery_threads;
639 int client_migrated;
640 int reinit;
641 int events_to_log;
642#define IBMVFC_AE_LINKUP 0x0001
643#define IBMVFC_AE_LINKDOWN 0x0002
644#define IBMVFC_AE_RSCN 0x0004
645 dma_addr_t disc_buf_dma;
646 unsigned int partition_number;
647 char partition_name[97];
648 void (*job_step) (struct ibmvfc_host *);
649 struct task_struct *work_thread;
650 wait_queue_head_t init_wait_q;
651 wait_queue_head_t work_wait_q;
652};
653
654#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
655
656#define tgt_dbg(t, fmt, ...) \
657 DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
658
659#define tgt_err(t, fmt, ...) \
660 dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
661
662#define ibmvfc_dbg(vhost, ...) \
663 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
664
665#define ibmvfc_log(vhost, level, ...) \
666 do { \
667 if (level >= (vhost)->log_level) \
668 dev_err((vhost)->dev, ##__VA_ARGS__); \
669 } while (0)
670
671#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__))
672#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__))
673
674#ifdef CONFIG_SCSI_IBMVFC_TRACE
675#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
676#define ibmvfc_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
677#else
678#define ibmvfc_create_trace_file(kobj, attr) 0
679#define ibmvfc_remove_trace_file(kobj, attr) do { } while (0)
680#endif
681
682#endif
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 683bce375c74..f843c1383a4b 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -258,19 +258,6 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
258 return ide_stopped; 258 return ide_stopped;
259} 259}
260 260
261static ide_startstop_t
262idescsi_atapi_abort(ide_drive_t *drive, struct request *rq)
263{
264 debug_log("%s called for %lu\n", __func__,
265 ((struct ide_atapi_pc *) rq->special)->scsi_cmd->serial_number);
266
267 rq->errors |= ERROR_MAX;
268
269 idescsi_end_request(drive, 0, 0);
270
271 return ide_stopped;
272}
273
274static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs) 261static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
275{ 262{
276 idescsi_scsi_t *scsi = drive_to_idescsi(drive); 263 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
@@ -524,7 +511,6 @@ static ide_driver_t idescsi_driver = {
524 .do_request = idescsi_do_request, 511 .do_request = idescsi_do_request,
525 .end_request = idescsi_end_request, 512 .end_request = idescsi_end_request,
526 .error = idescsi_atapi_error, 513 .error = idescsi_atapi_error,
527 .abort = idescsi_atapi_abort,
528#ifdef CONFIG_IDE_PROC_FS 514#ifdef CONFIG_IDE_PROC_FS
529 .proc = idescsi_proc, 515 .proc = idescsi_proc,
530#endif 516#endif
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 72b9b2a0eba3..2a2f0094570f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -64,6 +64,10 @@ MODULE_LICENSE("GPL");
64#define BUG_ON(expr) 64#define BUG_ON(expr)
65#endif 65#endif
66 66
67static struct scsi_transport_template *iscsi_tcp_scsi_transport;
68static struct scsi_host_template iscsi_sht;
69static struct iscsi_transport iscsi_tcp_transport;
70
67static unsigned int iscsi_max_lun = 512; 71static unsigned int iscsi_max_lun = 512;
68module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 72module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
69 73
@@ -494,39 +498,43 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
494 * must be called with session lock 498 * must be called with session lock
495 */ 499 */
496static void 500static void
497iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 501iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
498{ 502{
499 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 503 struct iscsi_tcp_task *tcp_task = task->dd_data;
500 struct iscsi_r2t_info *r2t; 504 struct iscsi_r2t_info *r2t;
501 505
502 /* flush ctask's r2t queues */ 506 /* nothing to do for mgmt tasks */
503 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { 507 if (!task->sc)
504 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 508 return;
509
510 /* flush task's r2t queues */
511 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
512 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
505 sizeof(void*)); 513 sizeof(void*));
506 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); 514 debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
507 } 515 }
508 516
509 r2t = tcp_ctask->r2t; 517 r2t = tcp_task->r2t;
510 if (r2t != NULL) { 518 if (r2t != NULL) {
511 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 519 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
512 sizeof(void*)); 520 sizeof(void*));
513 tcp_ctask->r2t = NULL; 521 tcp_task->r2t = NULL;
514 } 522 }
515} 523}
516 524
517/** 525/**
518 * iscsi_data_rsp - SCSI Data-In Response processing 526 * iscsi_data_rsp - SCSI Data-In Response processing
519 * @conn: iscsi connection 527 * @conn: iscsi connection
520 * @ctask: scsi command task 528 * @task: scsi command task
521 **/ 529 **/
522static int 530static int
523iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 531iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
524{ 532{
525 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 533 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
526 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 534 struct iscsi_tcp_task *tcp_task = task->dd_data;
527 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; 535 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
528 struct iscsi_session *session = conn->session; 536 struct iscsi_session *session = conn->session;
529 struct scsi_cmnd *sc = ctask->sc; 537 struct scsi_cmnd *sc = task->sc;
530 int datasn = be32_to_cpu(rhdr->datasn); 538 int datasn = be32_to_cpu(rhdr->datasn);
531 unsigned total_in_length = scsi_in(sc)->length; 539 unsigned total_in_length = scsi_in(sc)->length;
532 540
@@ -534,18 +542,18 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
534 if (tcp_conn->in.datalen == 0) 542 if (tcp_conn->in.datalen == 0)
535 return 0; 543 return 0;
536 544
537 if (tcp_ctask->exp_datasn != datasn) { 545 if (tcp_task->exp_datasn != datasn) {
538 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n", 546 debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
539 __FUNCTION__, tcp_ctask->exp_datasn, datasn); 547 __func__, tcp_task->exp_datasn, datasn);
540 return ISCSI_ERR_DATASN; 548 return ISCSI_ERR_DATASN;
541 } 549 }
542 550
543 tcp_ctask->exp_datasn++; 551 tcp_task->exp_datasn++;
544 552
545 tcp_ctask->data_offset = be32_to_cpu(rhdr->offset); 553 tcp_task->data_offset = be32_to_cpu(rhdr->offset);
546 if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) { 554 if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
547 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n", 555 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
548 __FUNCTION__, tcp_ctask->data_offset, 556 __func__, tcp_task->data_offset,
549 tcp_conn->in.datalen, total_in_length); 557 tcp_conn->in.datalen, total_in_length);
550 return ISCSI_ERR_DATA_OFFSET; 558 return ISCSI_ERR_DATA_OFFSET;
551 } 559 }
@@ -574,7 +582,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
574/** 582/**
575 * iscsi_solicit_data_init - initialize first Data-Out 583 * iscsi_solicit_data_init - initialize first Data-Out
576 * @conn: iscsi connection 584 * @conn: iscsi connection
577 * @ctask: scsi command task 585 * @task: scsi command task
578 * @r2t: R2T info 586 * @r2t: R2T info
579 * 587 *
580 * Notes: 588 * Notes:
@@ -584,7 +592,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
584 * This function is called with connection lock taken. 592 * This function is called with connection lock taken.
585 **/ 593 **/
586static void 594static void
587iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 595iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
588 struct iscsi_r2t_info *r2t) 596 struct iscsi_r2t_info *r2t)
589{ 597{
590 struct iscsi_data *hdr; 598 struct iscsi_data *hdr;
@@ -595,8 +603,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
595 hdr->datasn = cpu_to_be32(r2t->solicit_datasn); 603 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
596 r2t->solicit_datasn++; 604 r2t->solicit_datasn++;
597 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 605 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
598 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 606 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
599 hdr->itt = ctask->hdr->itt; 607 hdr->itt = task->hdr->itt;
600 hdr->exp_statsn = r2t->exp_statsn; 608 hdr->exp_statsn = r2t->exp_statsn;
601 hdr->offset = cpu_to_be32(r2t->data_offset); 609 hdr->offset = cpu_to_be32(r2t->data_offset);
602 if (r2t->data_length > conn->max_xmit_dlength) { 610 if (r2t->data_length > conn->max_xmit_dlength) {
@@ -616,14 +624,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
616/** 624/**
617 * iscsi_r2t_rsp - iSCSI R2T Response processing 625 * iscsi_r2t_rsp - iSCSI R2T Response processing
618 * @conn: iscsi connection 626 * @conn: iscsi connection
619 * @ctask: scsi command task 627 * @task: scsi command task
620 **/ 628 **/
621static int 629static int
622iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 630iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
623{ 631{
624 struct iscsi_r2t_info *r2t; 632 struct iscsi_r2t_info *r2t;
625 struct iscsi_session *session = conn->session; 633 struct iscsi_session *session = conn->session;
626 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 634 struct iscsi_tcp_task *tcp_task = task->dd_data;
627 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 635 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
628 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; 636 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
629 int r2tsn = be32_to_cpu(rhdr->r2tsn); 637 int r2tsn = be32_to_cpu(rhdr->r2tsn);
@@ -636,23 +644,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
636 return ISCSI_ERR_DATALEN; 644 return ISCSI_ERR_DATALEN;
637 } 645 }
638 646
639 if (tcp_ctask->exp_datasn != r2tsn){ 647 if (tcp_task->exp_datasn != r2tsn){
640 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n", 648 debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
641 __FUNCTION__, tcp_ctask->exp_datasn, r2tsn); 649 __func__, tcp_task->exp_datasn, r2tsn);
642 return ISCSI_ERR_R2TSN; 650 return ISCSI_ERR_R2TSN;
643 } 651 }
644 652
645 /* fill-in new R2T associated with the task */ 653 /* fill-in new R2T associated with the task */
646 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 654 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
647 655
648 if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) { 656 if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
649 iscsi_conn_printk(KERN_INFO, conn, 657 iscsi_conn_printk(KERN_INFO, conn,
650 "dropping R2T itt %d in recovery.\n", 658 "dropping R2T itt %d in recovery.\n",
651 ctask->itt); 659 task->itt);
652 return 0; 660 return 0;
653 } 661 }
654 662
655 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 663 rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
656 BUG_ON(!rc); 664 BUG_ON(!rc);
657 665
658 r2t->exp_statsn = rhdr->statsn; 666 r2t->exp_statsn = rhdr->statsn;
@@ -660,7 +668,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
660 if (r2t->data_length == 0) { 668 if (r2t->data_length == 0) {
661 iscsi_conn_printk(KERN_ERR, conn, 669 iscsi_conn_printk(KERN_ERR, conn,
662 "invalid R2T with zero data len\n"); 670 "invalid R2T with zero data len\n");
663 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 671 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
664 sizeof(void*)); 672 sizeof(void*));
665 return ISCSI_ERR_DATALEN; 673 return ISCSI_ERR_DATALEN;
666 } 674 }
@@ -671,12 +679,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
671 r2t->data_length, session->max_burst); 679 r2t->data_length, session->max_burst);
672 680
673 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 681 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
674 if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) { 682 if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
675 iscsi_conn_printk(KERN_ERR, conn, 683 iscsi_conn_printk(KERN_ERR, conn,
676 "invalid R2T with data len %u at offset %u " 684 "invalid R2T with data len %u at offset %u "
677 "and total length %d\n", r2t->data_length, 685 "and total length %d\n", r2t->data_length,
678 r2t->data_offset, scsi_out(ctask->sc)->length); 686 r2t->data_offset, scsi_out(task->sc)->length);
679 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 687 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
680 sizeof(void*)); 688 sizeof(void*));
681 return ISCSI_ERR_DATALEN; 689 return ISCSI_ERR_DATALEN;
682 } 690 }
@@ -684,13 +692,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
684 r2t->ttt = rhdr->ttt; /* no flip */ 692 r2t->ttt = rhdr->ttt; /* no flip */
685 r2t->solicit_datasn = 0; 693 r2t->solicit_datasn = 0;
686 694
687 iscsi_solicit_data_init(conn, ctask, r2t); 695 iscsi_solicit_data_init(conn, task, r2t);
688 696
689 tcp_ctask->exp_datasn = r2tsn + 1; 697 tcp_task->exp_datasn = r2tsn + 1;
690 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); 698 __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
691 conn->r2t_pdus_cnt++; 699 conn->r2t_pdus_cnt++;
692 700
693 iscsi_requeue_ctask(ctask); 701 iscsi_requeue_task(task);
694 return 0; 702 return 0;
695} 703}
696 704
@@ -733,10 +741,8 @@ static int
733iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 741iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
734{ 742{
735 int rc = 0, opcode, ahslen; 743 int rc = 0, opcode, ahslen;
736 struct iscsi_session *session = conn->session;
737 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 744 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
738 struct iscsi_cmd_task *ctask; 745 struct iscsi_task *task;
739 uint32_t itt;
740 746
741 /* verify PDU length */ 747 /* verify PDU length */
742 tcp_conn->in.datalen = ntoh24(hdr->dlength); 748 tcp_conn->in.datalen = ntoh24(hdr->dlength);
@@ -754,7 +760,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
754 760
755 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 761 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
756 /* verify itt (itt encoding: age+cid+itt) */ 762 /* verify itt (itt encoding: age+cid+itt) */
757 rc = iscsi_verify_itt(conn, hdr, &itt); 763 rc = iscsi_verify_itt(conn, hdr->itt);
758 if (rc) 764 if (rc)
759 return rc; 765 return rc;
760 766
@@ -763,16 +769,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
763 769
764 switch(opcode) { 770 switch(opcode) {
765 case ISCSI_OP_SCSI_DATA_IN: 771 case ISCSI_OP_SCSI_DATA_IN:
766 ctask = session->cmds[itt];
767 spin_lock(&conn->session->lock); 772 spin_lock(&conn->session->lock);
768 rc = iscsi_data_rsp(conn, ctask); 773 task = iscsi_itt_to_ctask(conn, hdr->itt);
769 spin_unlock(&conn->session->lock); 774 if (!task)
770 if (rc) 775 rc = ISCSI_ERR_BAD_ITT;
771 return rc; 776 else
777 rc = iscsi_data_rsp(conn, task);
778 if (rc) {
779 spin_unlock(&conn->session->lock);
780 break;
781 }
782
772 if (tcp_conn->in.datalen) { 783 if (tcp_conn->in.datalen) {
773 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 784 struct iscsi_tcp_task *tcp_task = task->dd_data;
774 struct hash_desc *rx_hash = NULL; 785 struct hash_desc *rx_hash = NULL;
775 struct scsi_data_buffer *sdb = scsi_in(ctask->sc); 786 struct scsi_data_buffer *sdb = scsi_in(task->sc);
776 787
777 /* 788 /*
778 * Setup copy of Data-In into the Scsi_Cmnd 789 * Setup copy of Data-In into the Scsi_Cmnd
@@ -787,17 +798,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
787 798
788 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, " 799 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
789 "datalen=%d)\n", tcp_conn, 800 "datalen=%d)\n", tcp_conn,
790 tcp_ctask->data_offset, 801 tcp_task->data_offset,
791 tcp_conn->in.datalen); 802 tcp_conn->in.datalen);
792 return iscsi_segment_seek_sg(&tcp_conn->in.segment, 803 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
793 sdb->table.sgl, 804 sdb->table.sgl,
794 sdb->table.nents, 805 sdb->table.nents,
795 tcp_ctask->data_offset, 806 tcp_task->data_offset,
796 tcp_conn->in.datalen, 807 tcp_conn->in.datalen,
797 iscsi_tcp_process_data_in, 808 iscsi_tcp_process_data_in,
798 rx_hash); 809 rx_hash);
810 spin_unlock(&conn->session->lock);
811 return rc;
799 } 812 }
800 /* fall through */ 813 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
814 spin_unlock(&conn->session->lock);
815 break;
801 case ISCSI_OP_SCSI_CMD_RSP: 816 case ISCSI_OP_SCSI_CMD_RSP:
802 if (tcp_conn->in.datalen) { 817 if (tcp_conn->in.datalen) {
803 iscsi_tcp_data_recv_prep(tcp_conn); 818 iscsi_tcp_data_recv_prep(tcp_conn);
@@ -806,15 +821,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
806 rc = iscsi_complete_pdu(conn, hdr, NULL, 0); 821 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
807 break; 822 break;
808 case ISCSI_OP_R2T: 823 case ISCSI_OP_R2T:
809 ctask = session->cmds[itt]; 824 spin_lock(&conn->session->lock);
810 if (ahslen) 825 task = iscsi_itt_to_ctask(conn, hdr->itt);
826 if (!task)
827 rc = ISCSI_ERR_BAD_ITT;
828 else if (ahslen)
811 rc = ISCSI_ERR_AHSLEN; 829 rc = ISCSI_ERR_AHSLEN;
812 else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { 830 else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
813 spin_lock(&session->lock); 831 rc = iscsi_r2t_rsp(conn, task);
814 rc = iscsi_r2t_rsp(conn, ctask); 832 else
815 spin_unlock(&session->lock);
816 } else
817 rc = ISCSI_ERR_PROTO; 833 rc = ISCSI_ERR_PROTO;
834 spin_unlock(&conn->session->lock);
818 break; 835 break;
819 case ISCSI_OP_LOGIN_RSP: 836 case ISCSI_OP_LOGIN_RSP:
820 case ISCSI_OP_TEXT_RSP: 837 case ISCSI_OP_TEXT_RSP:
@@ -1176,7 +1193,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1176{ 1193{
1177 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1194 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1178 1195
1179 debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn, 1196 debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
1180 conn->hdrdgst_en? ", digest enabled" : ""); 1197 conn->hdrdgst_en? ", digest enabled" : "");
1181 1198
1182 /* Clear the data segment - needs to be filled in by the 1199 /* Clear the data segment - needs to be filled in by the
@@ -1185,7 +1202,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1185 1202
1186 /* If header digest is enabled, compute the CRC and 1203 /* If header digest is enabled, compute the CRC and
1187 * place the digest into the same buffer. We make 1204 * place the digest into the same buffer. We make
1188 * sure that both iscsi_tcp_ctask and mtask have 1205 * sure that both iscsi_tcp_task and mtask have
1189 * sufficient room. 1206 * sufficient room.
1190 */ 1207 */
1191 if (conn->hdrdgst_en) { 1208 if (conn->hdrdgst_en) {
@@ -1217,7 +1234,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
1217 struct hash_desc *tx_hash = NULL; 1234 struct hash_desc *tx_hash = NULL;
1218 unsigned int hdr_spec_len; 1235 unsigned int hdr_spec_len;
1219 1236
1220 debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__, 1237 debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
1221 tcp_conn, offset, len, 1238 tcp_conn, offset, len,
1222 conn->datadgst_en? ", digest enabled" : ""); 1239 conn->datadgst_en? ", digest enabled" : "");
1223 1240
@@ -1242,7 +1259,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1242 struct hash_desc *tx_hash = NULL; 1259 struct hash_desc *tx_hash = NULL;
1243 unsigned int hdr_spec_len; 1260 unsigned int hdr_spec_len;
1244 1261
1245 debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len, 1262 debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
1246 conn->datadgst_en? ", digest enabled" : ""); 1263 conn->datadgst_en? ", digest enabled" : "");
1247 1264
1248 /* Make sure the datalen matches what the caller 1265 /* Make sure the datalen matches what the caller
@@ -1260,7 +1277,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1260/** 1277/**
1261 * iscsi_solicit_data_cont - initialize next Data-Out 1278 * iscsi_solicit_data_cont - initialize next Data-Out
1262 * @conn: iscsi connection 1279 * @conn: iscsi connection
1263 * @ctask: scsi command task 1280 * @task: scsi command task
1264 * @r2t: R2T info 1281 * @r2t: R2T info
1265 * @left: bytes left to transfer 1282 * @left: bytes left to transfer
1266 * 1283 *
@@ -1271,7 +1288,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1271 * Called under connection lock. 1288 * Called under connection lock.
1272 **/ 1289 **/
1273static int 1290static int
1274iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1291iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
1275 struct iscsi_r2t_info *r2t) 1292 struct iscsi_r2t_info *r2t)
1276{ 1293{
1277 struct iscsi_data *hdr; 1294 struct iscsi_data *hdr;
@@ -1288,8 +1305,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1288 hdr->datasn = cpu_to_be32(r2t->solicit_datasn); 1305 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1289 r2t->solicit_datasn++; 1306 r2t->solicit_datasn++;
1290 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 1307 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1291 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 1308 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
1292 hdr->itt = ctask->hdr->itt; 1309 hdr->itt = task->hdr->itt;
1293 hdr->exp_statsn = r2t->exp_statsn; 1310 hdr->exp_statsn = r2t->exp_statsn;
1294 new_offset = r2t->data_offset + r2t->sent; 1311 new_offset = r2t->data_offset + r2t->sent;
1295 hdr->offset = cpu_to_be32(new_offset); 1312 hdr->offset = cpu_to_be32(new_offset);
@@ -1307,89 +1324,76 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1307} 1324}
1308 1325
1309/** 1326/**
1310 * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands 1327 * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1311 * @conn: iscsi connection 1328 * @conn: iscsi connection
1312 * @ctask: scsi command task 1329 * @task: scsi command task
1313 * @sc: scsi command 1330 * @sc: scsi command
1314 **/ 1331 **/
1315static int 1332static int
1316iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask) 1333iscsi_tcp_task_init(struct iscsi_task *task)
1317{ 1334{
1318 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1335 struct iscsi_tcp_task *tcp_task = task->dd_data;
1319 struct iscsi_conn *conn = ctask->conn; 1336 struct iscsi_conn *conn = task->conn;
1320 struct scsi_cmnd *sc = ctask->sc; 1337 struct scsi_cmnd *sc = task->sc;
1321 int err; 1338 int err;
1322 1339
1323 BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); 1340 if (!sc) {
1324 tcp_ctask->sent = 0; 1341 /*
1325 tcp_ctask->exp_datasn = 0; 1342 * mgmt tasks do not have a scatterlist since they come
1343 * in from the iscsi interface.
1344 */
1345 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
1346 task->itt);
1347
1348 /* Prepare PDU, optionally w/ immediate data */
1349 iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
1350
1351 /* If we have immediate data, attach a payload */
1352 if (task->data_count)
1353 iscsi_tcp_send_linear_data_prepare(conn, task->data,
1354 task->data_count);
1355 return 0;
1356 }
1357
1358 BUG_ON(__kfifo_len(tcp_task->r2tqueue));
1359 tcp_task->sent = 0;
1360 tcp_task->exp_datasn = 0;
1326 1361
1327 /* Prepare PDU, optionally w/ immediate data */ 1362 /* Prepare PDU, optionally w/ immediate data */
1328 debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n", 1363 debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
1329 conn->id, ctask->itt, ctask->imm_count, 1364 conn->id, task->itt, task->imm_count,
1330 ctask->unsol_count); 1365 task->unsol_count);
1331 iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len); 1366 iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
1332 1367
1333 if (!ctask->imm_count) 1368 if (!task->imm_count)
1334 return 0; 1369 return 0;
1335 1370
1336 /* If we have immediate data, attach a payload */ 1371 /* If we have immediate data, attach a payload */
1337 err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl, 1372 err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
1338 scsi_out(sc)->table.nents, 1373 scsi_out(sc)->table.nents,
1339 0, ctask->imm_count); 1374 0, task->imm_count);
1340 if (err) 1375 if (err)
1341 return err; 1376 return err;
1342 tcp_ctask->sent += ctask->imm_count; 1377 tcp_task->sent += task->imm_count;
1343 ctask->imm_count = 0; 1378 task->imm_count = 0;
1344 return 0;
1345}
1346
1347/**
1348 * iscsi_tcp_mtask_xmit - xmit management(immediate) task
1349 * @conn: iscsi connection
1350 * @mtask: task management task
1351 *
1352 * Notes:
1353 * The function can return -EAGAIN in which case caller must
1354 * call it again later, or recover. '0' return code means successful
1355 * xmit.
1356 **/
1357static int
1358iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1359{
1360 int rc;
1361
1362 /* Flush any pending data first. */
1363 rc = iscsi_tcp_flush(conn);
1364 if (rc < 0)
1365 return rc;
1366
1367 if (mtask->hdr->itt == RESERVED_ITT) {
1368 struct iscsi_session *session = conn->session;
1369
1370 spin_lock_bh(&session->lock);
1371 iscsi_free_mgmt_task(conn, mtask);
1372 spin_unlock_bh(&session->lock);
1373 }
1374
1375 return 0; 1379 return 0;
1376} 1380}
1377 1381
1378/* 1382/*
1379 * iscsi_tcp_ctask_xmit - xmit normal PDU task 1383 * iscsi_tcp_task_xmit - xmit normal PDU task
1380 * @conn: iscsi connection 1384 * @task: iscsi command task
1381 * @ctask: iscsi command task
1382 * 1385 *
1383 * We're expected to return 0 when everything was transmitted succesfully, 1386 * We're expected to return 0 when everything was transmitted succesfully,
1384 * -EAGAIN if there's still data in the queue, or != 0 for any other kind 1387 * -EAGAIN if there's still data in the queue, or != 0 for any other kind
1385 * of error. 1388 * of error.
1386 */ 1389 */
1387static int 1390static int
1388iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1391iscsi_tcp_task_xmit(struct iscsi_task *task)
1389{ 1392{
1390 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1393 struct iscsi_conn *conn = task->conn;
1391 struct scsi_cmnd *sc = ctask->sc; 1394 struct iscsi_tcp_task *tcp_task = task->dd_data;
1392 struct scsi_data_buffer *sdb = scsi_out(sc); 1395 struct scsi_cmnd *sc = task->sc;
1396 struct scsi_data_buffer *sdb;
1393 int rc = 0; 1397 int rc = 0;
1394 1398
1395flush: 1399flush:
@@ -1398,31 +1402,39 @@ flush:
1398 if (rc < 0) 1402 if (rc < 0)
1399 return rc; 1403 return rc;
1400 1404
1405 /* mgmt command */
1406 if (!sc) {
1407 if (task->hdr->itt == RESERVED_ITT)
1408 iscsi_put_task(task);
1409 return 0;
1410 }
1411
1401 /* Are we done already? */ 1412 /* Are we done already? */
1402 if (sc->sc_data_direction != DMA_TO_DEVICE) 1413 if (sc->sc_data_direction != DMA_TO_DEVICE)
1403 return 0; 1414 return 0;
1404 1415
1405 if (ctask->unsol_count != 0) { 1416 sdb = scsi_out(sc);
1406 struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr; 1417 if (task->unsol_count != 0) {
1418 struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
1407 1419
1408 /* Prepare a header for the unsolicited PDU. 1420 /* Prepare a header for the unsolicited PDU.
1409 * The amount of data we want to send will be 1421 * The amount of data we want to send will be
1410 * in ctask->data_count. 1422 * in task->data_count.
1411 * FIXME: return the data count instead. 1423 * FIXME: return the data count instead.
1412 */ 1424 */
1413 iscsi_prep_unsolicit_data_pdu(ctask, hdr); 1425 iscsi_prep_unsolicit_data_pdu(task, hdr);
1414 1426
1415 debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n", 1427 debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
1416 ctask->itt, tcp_ctask->sent, ctask->data_count); 1428 task->itt, tcp_task->sent, task->data_count);
1417 1429
1418 iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr)); 1430 iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
1419 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl, 1431 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
1420 sdb->table.nents, tcp_ctask->sent, 1432 sdb->table.nents, tcp_task->sent,
1421 ctask->data_count); 1433 task->data_count);
1422 if (rc) 1434 if (rc)
1423 goto fail; 1435 goto fail;
1424 tcp_ctask->sent += ctask->data_count; 1436 tcp_task->sent += task->data_count;
1425 ctask->unsol_count -= ctask->data_count; 1437 task->unsol_count -= task->data_count;
1426 goto flush; 1438 goto flush;
1427 } else { 1439 } else {
1428 struct iscsi_session *session = conn->session; 1440 struct iscsi_session *session = conn->session;
@@ -1431,22 +1443,22 @@ flush:
1431 /* All unsolicited PDUs sent. Check for solicited PDUs. 1443 /* All unsolicited PDUs sent. Check for solicited PDUs.
1432 */ 1444 */
1433 spin_lock_bh(&session->lock); 1445 spin_lock_bh(&session->lock);
1434 r2t = tcp_ctask->r2t; 1446 r2t = tcp_task->r2t;
1435 if (r2t != NULL) { 1447 if (r2t != NULL) {
1436 /* Continue with this R2T? */ 1448 /* Continue with this R2T? */
1437 if (!iscsi_solicit_data_cont(conn, ctask, r2t)) { 1449 if (!iscsi_solicit_data_cont(conn, task, r2t)) {
1438 debug_scsi(" done with r2t %p\n", r2t); 1450 debug_scsi(" done with r2t %p\n", r2t);
1439 1451
1440 __kfifo_put(tcp_ctask->r2tpool.queue, 1452 __kfifo_put(tcp_task->r2tpool.queue,
1441 (void*)&r2t, sizeof(void*)); 1453 (void*)&r2t, sizeof(void*));
1442 tcp_ctask->r2t = r2t = NULL; 1454 tcp_task->r2t = r2t = NULL;
1443 } 1455 }
1444 } 1456 }
1445 1457
1446 if (r2t == NULL) { 1458 if (r2t == NULL) {
1447 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, 1459 __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
1448 sizeof(void*)); 1460 sizeof(void*));
1449 r2t = tcp_ctask->r2t; 1461 r2t = tcp_task->r2t;
1450 } 1462 }
1451 spin_unlock_bh(&session->lock); 1463 spin_unlock_bh(&session->lock);
1452 1464
@@ -1457,7 +1469,7 @@ flush:
1457 } 1469 }
1458 1470
1459 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", 1471 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1460 r2t, r2t->solicit_datasn - 1, ctask->itt, 1472 r2t, r2t->solicit_datasn - 1, task->itt,
1461 r2t->data_offset + r2t->sent, r2t->data_count); 1473 r2t->data_offset + r2t->sent, r2t->data_count);
1462 1474
1463 iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr, 1475 iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
@@ -1469,7 +1481,7 @@ flush:
1469 r2t->data_count); 1481 r2t->data_count);
1470 if (rc) 1482 if (rc)
1471 goto fail; 1483 goto fail;
1472 tcp_ctask->sent += r2t->data_count; 1484 tcp_task->sent += r2t->data_count;
1473 r2t->sent += r2t->data_count; 1485 r2t->sent += r2t->data_count;
1474 goto flush; 1486 goto flush;
1475 } 1487 }
@@ -1486,7 +1498,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1486 struct iscsi_cls_conn *cls_conn; 1498 struct iscsi_cls_conn *cls_conn;
1487 struct iscsi_tcp_conn *tcp_conn; 1499 struct iscsi_tcp_conn *tcp_conn;
1488 1500
1489 cls_conn = iscsi_conn_setup(cls_session, conn_idx); 1501 cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
1490 if (!cls_conn) 1502 if (!cls_conn)
1491 return NULL; 1503 return NULL;
1492 conn = cls_conn->dd_data; 1504 conn = cls_conn->dd_data;
@@ -1496,18 +1508,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1496 */ 1508 */
1497 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN; 1509 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1498 1510
1499 tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL); 1511 tcp_conn = conn->dd_data;
1500 if (!tcp_conn)
1501 goto tcp_conn_alloc_fail;
1502
1503 conn->dd_data = tcp_conn;
1504 tcp_conn->iscsi_conn = conn; 1512 tcp_conn->iscsi_conn = conn;
1505 1513
1506 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1514 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1507 CRYPTO_ALG_ASYNC); 1515 CRYPTO_ALG_ASYNC);
1508 tcp_conn->tx_hash.flags = 0; 1516 tcp_conn->tx_hash.flags = 0;
1509 if (IS_ERR(tcp_conn->tx_hash.tfm)) 1517 if (IS_ERR(tcp_conn->tx_hash.tfm))
1510 goto free_tcp_conn; 1518 goto free_conn;
1511 1519
1512 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1520 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1513 CRYPTO_ALG_ASYNC); 1521 CRYPTO_ALG_ASYNC);
@@ -1519,14 +1527,12 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1519 1527
1520free_tx_tfm: 1528free_tx_tfm:
1521 crypto_free_hash(tcp_conn->tx_hash.tfm); 1529 crypto_free_hash(tcp_conn->tx_hash.tfm);
1522free_tcp_conn: 1530free_conn:
1523 iscsi_conn_printk(KERN_ERR, conn, 1531 iscsi_conn_printk(KERN_ERR, conn,
1524 "Could not create connection due to crc32c " 1532 "Could not create connection due to crc32c "
1525 "loading error. Make sure the crc32c " 1533 "loading error. Make sure the crc32c "
1526 "module is built as a module or into the " 1534 "module is built as a module or into the "
1527 "kernel\n"); 1535 "kernel\n");
1528 kfree(tcp_conn);
1529tcp_conn_alloc_fail:
1530 iscsi_conn_teardown(cls_conn); 1536 iscsi_conn_teardown(cls_conn);
1531 return NULL; 1537 return NULL;
1532} 1538}
@@ -1547,7 +1553,6 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
1547 1553
1548 spin_lock_bh(&session->lock); 1554 spin_lock_bh(&session->lock);
1549 tcp_conn->sock = NULL; 1555 tcp_conn->sock = NULL;
1550 conn->recv_lock = NULL;
1551 spin_unlock_bh(&session->lock); 1556 spin_unlock_bh(&session->lock);
1552 sockfd_put(sock); 1557 sockfd_put(sock);
1553} 1558}
@@ -1559,20 +1564,32 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1559 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1564 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1560 1565
1561 iscsi_tcp_release_conn(conn); 1566 iscsi_tcp_release_conn(conn);
1562 iscsi_conn_teardown(cls_conn);
1563 1567
1564 if (tcp_conn->tx_hash.tfm) 1568 if (tcp_conn->tx_hash.tfm)
1565 crypto_free_hash(tcp_conn->tx_hash.tfm); 1569 crypto_free_hash(tcp_conn->tx_hash.tfm);
1566 if (tcp_conn->rx_hash.tfm) 1570 if (tcp_conn->rx_hash.tfm)
1567 crypto_free_hash(tcp_conn->rx_hash.tfm); 1571 crypto_free_hash(tcp_conn->rx_hash.tfm);
1568 1572
1569 kfree(tcp_conn); 1573 iscsi_conn_teardown(cls_conn);
1570} 1574}
1571 1575
1572static void 1576static void
1573iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 1577iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1574{ 1578{
1575 struct iscsi_conn *conn = cls_conn->dd_data; 1579 struct iscsi_conn *conn = cls_conn->dd_data;
1580 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1581
1582 /* userspace may have goofed up and not bound us */
1583 if (!tcp_conn->sock)
1584 return;
1585 /*
1586 * Make sure our recv side is stopped.
1587 * Older tools called conn stop before ep_disconnect
1588 * so IO could still be coming in.
1589 */
1590 write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
1591 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1592 write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
1576 1593
1577 iscsi_conn_stop(cls_conn, flag); 1594 iscsi_conn_stop(cls_conn, flag);
1578 iscsi_tcp_release_conn(conn); 1595 iscsi_tcp_release_conn(conn);
@@ -1623,6 +1640,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1623 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 1640 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
1624 int is_leading) 1641 int is_leading)
1625{ 1642{
1643 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1644 struct iscsi_host *ihost = shost_priv(shost);
1626 struct iscsi_conn *conn = cls_conn->dd_data; 1645 struct iscsi_conn *conn = cls_conn->dd_data;
1627 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1646 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1628 struct sock *sk; 1647 struct sock *sk;
@@ -1646,8 +1665,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1646 if (err) 1665 if (err)
1647 goto free_socket; 1666 goto free_socket;
1648 1667
1649 err = iscsi_tcp_get_addr(conn, sock, conn->local_address, 1668 err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
1650 &conn->local_port, kernel_getsockname); 1669 &ihost->local_port, kernel_getsockname);
1651 if (err) 1670 if (err)
1652 goto free_socket; 1671 goto free_socket;
1653 1672
@@ -1664,13 +1683,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1664 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ 1683 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
1665 sk->sk_allocation = GFP_ATOMIC; 1684 sk->sk_allocation = GFP_ATOMIC;
1666 1685
1667 /* FIXME: disable Nagle's algorithm */
1668
1669 /*
1670 * Intercept TCP callbacks for sendfile like receive
1671 * processing.
1672 */
1673 conn->recv_lock = &sk->sk_callback_lock;
1674 iscsi_conn_set_callbacks(conn); 1686 iscsi_conn_set_callbacks(conn);
1675 tcp_conn->sendpage = tcp_conn->sock->ops->sendpage; 1687 tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
1676 /* 1688 /*
@@ -1684,21 +1696,6 @@ free_socket:
1684 return err; 1696 return err;
1685} 1697}
1686 1698
1687/* called with host lock */
1688static void
1689iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1690{
1691 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
1692
1693 /* Prepare PDU, optionally w/ immediate data */
1694 iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
1695
1696 /* If we have immediate data, attach a payload */
1697 if (mtask->data_count)
1698 iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
1699 mtask->data_count);
1700}
1701
1702static int 1699static int
1703iscsi_r2tpool_alloc(struct iscsi_session *session) 1700iscsi_r2tpool_alloc(struct iscsi_session *session)
1704{ 1701{
@@ -1709,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
1709 * initialize per-task: R2T pool and xmit queue 1706 * initialize per-task: R2T pool and xmit queue
1710 */ 1707 */
1711 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 1708 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1712 struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; 1709 struct iscsi_task *task = session->cmds[cmd_i];
1713 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1710 struct iscsi_tcp_task *tcp_task = task->dd_data;
1714 1711
1715 /* 1712 /*
1716 * pre-allocated x4 as much r2ts to handle race when 1713 * pre-allocated x4 as much r2ts to handle race when
@@ -1719,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
1719 */ 1716 */
1720 1717
1721 /* R2T pool */ 1718 /* R2T pool */
1722 if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL, 1719 if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
1723 sizeof(struct iscsi_r2t_info))) { 1720 sizeof(struct iscsi_r2t_info))) {
1724 goto r2t_alloc_fail; 1721 goto r2t_alloc_fail;
1725 } 1722 }
1726 1723
1727 /* R2T xmit queue */ 1724 /* R2T xmit queue */
1728 tcp_ctask->r2tqueue = kfifo_alloc( 1725 tcp_task->r2tqueue = kfifo_alloc(
1729 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); 1726 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
1730 if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) { 1727 if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
1731 iscsi_pool_free(&tcp_ctask->r2tpool); 1728 iscsi_pool_free(&tcp_task->r2tpool);
1732 goto r2t_alloc_fail; 1729 goto r2t_alloc_fail;
1733 } 1730 }
1734 } 1731 }
@@ -1737,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
1737 1734
1738r2t_alloc_fail: 1735r2t_alloc_fail:
1739 for (i = 0; i < cmd_i; i++) { 1736 for (i = 0; i < cmd_i; i++) {
1740 struct iscsi_cmd_task *ctask = session->cmds[i]; 1737 struct iscsi_task *task = session->cmds[i];
1741 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1738 struct iscsi_tcp_task *tcp_task = task->dd_data;
1742 1739
1743 kfifo_free(tcp_ctask->r2tqueue); 1740 kfifo_free(tcp_task->r2tqueue);
1744 iscsi_pool_free(&tcp_ctask->r2tpool); 1741 iscsi_pool_free(&tcp_task->r2tpool);
1745 } 1742 }
1746 return -ENOMEM; 1743 return -ENOMEM;
1747} 1744}
@@ -1752,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
1752 int i; 1749 int i;
1753 1750
1754 for (i = 0; i < session->cmds_max; i++) { 1751 for (i = 0; i < session->cmds_max; i++) {
1755 struct iscsi_cmd_task *ctask = session->cmds[i]; 1752 struct iscsi_task *task = session->cmds[i];
1756 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1753 struct iscsi_tcp_task *tcp_task = task->dd_data;
1757 1754
1758 kfifo_free(tcp_ctask->r2tqueue); 1755 kfifo_free(tcp_task->r2tqueue);
1759 iscsi_pool_free(&tcp_ctask->r2tpool); 1756 iscsi_pool_free(&tcp_task->r2tpool);
1760 } 1757 }
1761} 1758}
1762 1759
@@ -1821,29 +1818,6 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
1821 return len; 1818 return len;
1822} 1819}
1823 1820
1824static int
1825iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
1826 char *buf)
1827{
1828 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1829 int len;
1830
1831 switch (param) {
1832 case ISCSI_HOST_PARAM_IPADDRESS:
1833 spin_lock_bh(&session->lock);
1834 if (!session->leadconn)
1835 len = -ENODEV;
1836 else
1837 len = sprintf(buf, "%s\n",
1838 session->leadconn->local_address);
1839 spin_unlock_bh(&session->lock);
1840 break;
1841 default:
1842 return iscsi_host_get_param(shost, param, buf);
1843 }
1844 return len;
1845}
1846
1847static void 1821static void
1848iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 1822iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
1849{ 1823{
@@ -1869,54 +1843,70 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
1869} 1843}
1870 1844
1871static struct iscsi_cls_session * 1845static struct iscsi_cls_session *
1872iscsi_tcp_session_create(struct iscsi_transport *iscsit, 1846iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
1873 struct scsi_transport_template *scsit, 1847 uint16_t qdepth, uint32_t initial_cmdsn,
1874 uint16_t cmds_max, uint16_t qdepth, 1848 uint32_t *hostno)
1875 uint32_t initial_cmdsn, uint32_t *hostno)
1876{ 1849{
1877 struct iscsi_cls_session *cls_session; 1850 struct iscsi_cls_session *cls_session;
1878 struct iscsi_session *session; 1851 struct iscsi_session *session;
1879 uint32_t hn; 1852 struct Scsi_Host *shost;
1880 int cmd_i; 1853 int cmd_i;
1881 1854
1882 cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth, 1855 if (ep) {
1883 sizeof(struct iscsi_tcp_cmd_task), 1856 printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
1884 sizeof(struct iscsi_tcp_mgmt_task),
1885 initial_cmdsn, &hn);
1886 if (!cls_session)
1887 return NULL; 1857 return NULL;
1888 *hostno = hn;
1889
1890 session = class_to_transport_session(cls_session);
1891 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1892 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
1893 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1894
1895 ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
1896 ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
1897 } 1858 }
1898 1859
1899 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { 1860 shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
1900 struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; 1861 if (!shost)
1901 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; 1862 return NULL;
1863 shost->transportt = iscsi_tcp_scsi_transport;
1864 shost->max_lun = iscsi_max_lun;
1865 shost->max_id = 0;
1866 shost->max_channel = 0;
1867 shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
1868
1869 if (iscsi_host_add(shost, NULL))
1870 goto free_host;
1871 *hostno = shost->host_no;
1872
1873 cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
1874 sizeof(struct iscsi_tcp_task),
1875 initial_cmdsn, 0);
1876 if (!cls_session)
1877 goto remove_host;
1878 session = cls_session->dd_data;
1879
1880 shost->can_queue = session->scsi_cmds_max;
1881 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1882 struct iscsi_task *task = session->cmds[cmd_i];
1883 struct iscsi_tcp_task *tcp_task = task->dd_data;
1902 1884
1903 mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr; 1885 task->hdr = &tcp_task->hdr.cmd_hdr;
1886 task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
1904 } 1887 }
1905 1888
1906 if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session))) 1889 if (iscsi_r2tpool_alloc(session))
1907 goto r2tpool_alloc_fail; 1890 goto remove_session;
1908
1909 return cls_session; 1891 return cls_session;
1910 1892
1911r2tpool_alloc_fail: 1893remove_session:
1912 iscsi_session_teardown(cls_session); 1894 iscsi_session_teardown(cls_session);
1895remove_host:
1896 iscsi_host_remove(shost);
1897free_host:
1898 iscsi_host_free(shost);
1913 return NULL; 1899 return NULL;
1914} 1900}
1915 1901
1916static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) 1902static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
1917{ 1903{
1918 iscsi_r2tpool_free(class_to_transport_session(cls_session)); 1904 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1919 iscsi_session_teardown(cls_session); 1905
1906 iscsi_r2tpool_free(cls_session->dd_data);
1907
1908 iscsi_host_remove(shost);
1909 iscsi_host_free(shost);
1920} 1910}
1921 1911
1922static int iscsi_tcp_slave_configure(struct scsi_device *sdev) 1912static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
@@ -1971,14 +1961,11 @@ static struct iscsi_transport iscsi_tcp_transport = {
1971 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 1961 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
1972 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 1962 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
1973 ISCSI_LU_RESET_TMO | 1963 ISCSI_LU_RESET_TMO |
1974 ISCSI_PING_TMO | ISCSI_RECV_TMO, 1964 ISCSI_PING_TMO | ISCSI_RECV_TMO |
1965 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
1975 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | 1966 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
1976 ISCSI_HOST_INITIATOR_NAME | 1967 ISCSI_HOST_INITIATOR_NAME |
1977 ISCSI_HOST_NETDEV_NAME, 1968 ISCSI_HOST_NETDEV_NAME,
1978 .host_template = &iscsi_sht,
1979 .conndata_size = sizeof(struct iscsi_conn),
1980 .max_conn = 1,
1981 .max_cmd_len = 16,
1982 /* session management */ 1969 /* session management */
1983 .create_session = iscsi_tcp_session_create, 1970 .create_session = iscsi_tcp_session_create,
1984 .destroy_session = iscsi_tcp_session_destroy, 1971 .destroy_session = iscsi_tcp_session_destroy,
@@ -1992,16 +1979,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
1992 .start_conn = iscsi_conn_start, 1979 .start_conn = iscsi_conn_start,
1993 .stop_conn = iscsi_tcp_conn_stop, 1980 .stop_conn = iscsi_tcp_conn_stop,
1994 /* iscsi host params */ 1981 /* iscsi host params */
1995 .get_host_param = iscsi_tcp_host_get_param, 1982 .get_host_param = iscsi_host_get_param,
1996 .set_host_param = iscsi_host_set_param, 1983 .set_host_param = iscsi_host_set_param,
1997 /* IO */ 1984 /* IO */
1998 .send_pdu = iscsi_conn_send_pdu, 1985 .send_pdu = iscsi_conn_send_pdu,
1999 .get_stats = iscsi_conn_get_stats, 1986 .get_stats = iscsi_conn_get_stats,
2000 .init_cmd_task = iscsi_tcp_ctask_init, 1987 .init_task = iscsi_tcp_task_init,
2001 .init_mgmt_task = iscsi_tcp_mtask_init, 1988 .xmit_task = iscsi_tcp_task_xmit,
2002 .xmit_cmd_task = iscsi_tcp_ctask_xmit, 1989 .cleanup_task = iscsi_tcp_cleanup_task,
2003 .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
2004 .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
2005 /* recovery */ 1990 /* recovery */
2006 .session_recovery_timedout = iscsi_session_recovery_timedout, 1991 .session_recovery_timedout = iscsi_session_recovery_timedout,
2007}; 1992};
@@ -2014,9 +1999,10 @@ iscsi_tcp_init(void)
2014 iscsi_max_lun); 1999 iscsi_max_lun);
2015 return -EINVAL; 2000 return -EINVAL;
2016 } 2001 }
2017 iscsi_tcp_transport.max_lun = iscsi_max_lun;
2018 2002
2019 if (!iscsi_register_transport(&iscsi_tcp_transport)) 2003 iscsi_tcp_scsi_transport = iscsi_register_transport(
2004 &iscsi_tcp_transport);
2005 if (!iscsi_tcp_scsi_transport)
2020 return -ENODEV; 2006 return -ENODEV;
2021 2007
2022 return 0; 2008 return 0;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index ed0b991d1e72..498d8ca39848 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -103,11 +103,6 @@ struct iscsi_data_task {
103 char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */ 103 char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
104}; 104};
105 105
106struct iscsi_tcp_mgmt_task {
107 struct iscsi_hdr hdr;
108 char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
109};
110
111struct iscsi_r2t_info { 106struct iscsi_r2t_info {
112 __be32 ttt; /* copied from R2T */ 107 __be32 ttt; /* copied from R2T */
113 __be32 exp_statsn; /* copied from R2T */ 108 __be32 exp_statsn; /* copied from R2T */
@@ -119,7 +114,7 @@ struct iscsi_r2t_info {
119 struct iscsi_data_task dtask; /* Data-Out header buf */ 114 struct iscsi_data_task dtask; /* Data-Out header buf */
120}; 115};
121 116
122struct iscsi_tcp_cmd_task { 117struct iscsi_tcp_task {
123 struct iscsi_hdr_buff { 118 struct iscsi_hdr_buff {
124 struct iscsi_cmd cmd_hdr; 119 struct iscsi_cmd cmd_hdr;
125 char hdrextbuf[ISCSI_MAX_AHS_SIZE + 120 char hdrextbuf[ISCSI_MAX_AHS_SIZE +
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b43bf1d60dac..299e075a7b34 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -38,14 +38,6 @@
38#include <scsi/scsi_transport_iscsi.h> 38#include <scsi/scsi_transport_iscsi.h>
39#include <scsi/libiscsi.h> 39#include <scsi/libiscsi.h>
40 40
41struct iscsi_session *
42class_to_transport_session(struct iscsi_cls_session *cls_session)
43{
44 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
45 return iscsi_hostdata(shost->hostdata);
46}
47EXPORT_SYMBOL_GPL(class_to_transport_session);
48
49/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ 41/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
50#define SNA32_CHECK 2147483648UL 42#define SNA32_CHECK 2147483648UL
51 43
@@ -87,68 +79,70 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
87 * xmit thread 79 * xmit thread
88 */ 80 */
89 if (!list_empty(&session->leadconn->xmitqueue) || 81 if (!list_empty(&session->leadconn->xmitqueue) ||
90 !list_empty(&session->leadconn->mgmtqueue)) 82 !list_empty(&session->leadconn->mgmtqueue)) {
91 scsi_queue_work(session->host, 83 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
92 &session->leadconn->xmitwork); 84 scsi_queue_work(session->host,
85 &session->leadconn->xmitwork);
86 }
93 } 87 }
94} 88}
95EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 89EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
96 90
97void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, 91void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
98 struct iscsi_data *hdr) 92 struct iscsi_data *hdr)
99{ 93{
100 struct iscsi_conn *conn = ctask->conn; 94 struct iscsi_conn *conn = task->conn;
101 95
102 memset(hdr, 0, sizeof(struct iscsi_data)); 96 memset(hdr, 0, sizeof(struct iscsi_data));
103 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); 97 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
104 hdr->datasn = cpu_to_be32(ctask->unsol_datasn); 98 hdr->datasn = cpu_to_be32(task->unsol_datasn);
105 ctask->unsol_datasn++; 99 task->unsol_datasn++;
106 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 100 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
107 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 101 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
108 102
109 hdr->itt = ctask->hdr->itt; 103 hdr->itt = task->hdr->itt;
110 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 104 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
111 hdr->offset = cpu_to_be32(ctask->unsol_offset); 105 hdr->offset = cpu_to_be32(task->unsol_offset);
112 106
113 if (ctask->unsol_count > conn->max_xmit_dlength) { 107 if (task->unsol_count > conn->max_xmit_dlength) {
114 hton24(hdr->dlength, conn->max_xmit_dlength); 108 hton24(hdr->dlength, conn->max_xmit_dlength);
115 ctask->data_count = conn->max_xmit_dlength; 109 task->data_count = conn->max_xmit_dlength;
116 ctask->unsol_offset += ctask->data_count; 110 task->unsol_offset += task->data_count;
117 hdr->flags = 0; 111 hdr->flags = 0;
118 } else { 112 } else {
119 hton24(hdr->dlength, ctask->unsol_count); 113 hton24(hdr->dlength, task->unsol_count);
120 ctask->data_count = ctask->unsol_count; 114 task->data_count = task->unsol_count;
121 hdr->flags = ISCSI_FLAG_CMD_FINAL; 115 hdr->flags = ISCSI_FLAG_CMD_FINAL;
122 } 116 }
123} 117}
124EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu); 118EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
125 119
126static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len) 120static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
127{ 121{
128 unsigned exp_len = ctask->hdr_len + len; 122 unsigned exp_len = task->hdr_len + len;
129 123
130 if (exp_len > ctask->hdr_max) { 124 if (exp_len > task->hdr_max) {
131 WARN_ON(1); 125 WARN_ON(1);
132 return -EINVAL; 126 return -EINVAL;
133 } 127 }
134 128
135 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */ 129 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
136 ctask->hdr_len = exp_len; 130 task->hdr_len = exp_len;
137 return 0; 131 return 0;
138} 132}
139 133
140/* 134/*
141 * make an extended cdb AHS 135 * make an extended cdb AHS
142 */ 136 */
143static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask) 137static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
144{ 138{
145 struct scsi_cmnd *cmd = ctask->sc; 139 struct scsi_cmnd *cmd = task->sc;
146 unsigned rlen, pad_len; 140 unsigned rlen, pad_len;
147 unsigned short ahslength; 141 unsigned short ahslength;
148 struct iscsi_ecdb_ahdr *ecdb_ahdr; 142 struct iscsi_ecdb_ahdr *ecdb_ahdr;
149 int rc; 143 int rc;
150 144
151 ecdb_ahdr = iscsi_next_hdr(ctask); 145 ecdb_ahdr = iscsi_next_hdr(task);
152 rlen = cmd->cmd_len - ISCSI_CDB_SIZE; 146 rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
153 147
154 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); 148 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
@@ -156,7 +150,7 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
156 150
157 pad_len = iscsi_padding(rlen); 151 pad_len = iscsi_padding(rlen);
158 152
159 rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) + 153 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
160 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); 154 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
161 if (rc) 155 if (rc)
162 return rc; 156 return rc;
@@ -171,19 +165,19 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
171 165
172 debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d " 166 debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
173 "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n", 167 "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
174 cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len); 168 cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
175 169
176 return 0; 170 return 0;
177} 171}
178 172
179static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask) 173static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
180{ 174{
181 struct scsi_cmnd *sc = ctask->sc; 175 struct scsi_cmnd *sc = task->sc;
182 struct iscsi_rlength_ahdr *rlen_ahdr; 176 struct iscsi_rlength_ahdr *rlen_ahdr;
183 int rc; 177 int rc;
184 178
185 rlen_ahdr = iscsi_next_hdr(ctask); 179 rlen_ahdr = iscsi_next_hdr(task);
186 rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr)); 180 rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
187 if (rc) 181 if (rc)
188 return rc; 182 return rc;
189 183
@@ -203,28 +197,28 @@ static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
203 197
204/** 198/**
205 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu 199 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
206 * @ctask: iscsi cmd task 200 * @task: iscsi task
207 * 201 *
208 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set 202 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
209 * fields like dlength or final based on how much data it sends 203 * fields like dlength or final based on how much data it sends
210 */ 204 */
211static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) 205static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
212{ 206{
213 struct iscsi_conn *conn = ctask->conn; 207 struct iscsi_conn *conn = task->conn;
214 struct iscsi_session *session = conn->session; 208 struct iscsi_session *session = conn->session;
215 struct iscsi_cmd *hdr = ctask->hdr; 209 struct iscsi_cmd *hdr = task->hdr;
216 struct scsi_cmnd *sc = ctask->sc; 210 struct scsi_cmnd *sc = task->sc;
217 unsigned hdrlength, cmd_len; 211 unsigned hdrlength, cmd_len;
218 int rc; 212 int rc;
219 213
220 ctask->hdr_len = 0; 214 task->hdr_len = 0;
221 rc = iscsi_add_hdr(ctask, sizeof(*hdr)); 215 rc = iscsi_add_hdr(task, sizeof(*hdr));
222 if (rc) 216 if (rc)
223 return rc; 217 return rc;
224 hdr->opcode = ISCSI_OP_SCSI_CMD; 218 hdr->opcode = ISCSI_OP_SCSI_CMD;
225 hdr->flags = ISCSI_ATTR_SIMPLE; 219 hdr->flags = ISCSI_ATTR_SIMPLE;
226 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 220 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
227 hdr->itt = build_itt(ctask->itt, session->age); 221 hdr->itt = build_itt(task->itt, session->age);
228 hdr->cmdsn = cpu_to_be32(session->cmdsn); 222 hdr->cmdsn = cpu_to_be32(session->cmdsn);
229 session->cmdsn++; 223 session->cmdsn++;
230 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 224 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
@@ -232,17 +226,17 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
232 if (cmd_len < ISCSI_CDB_SIZE) 226 if (cmd_len < ISCSI_CDB_SIZE)
233 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); 227 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
234 else if (cmd_len > ISCSI_CDB_SIZE) { 228 else if (cmd_len > ISCSI_CDB_SIZE) {
235 rc = iscsi_prep_ecdb_ahs(ctask); 229 rc = iscsi_prep_ecdb_ahs(task);
236 if (rc) 230 if (rc)
237 return rc; 231 return rc;
238 cmd_len = ISCSI_CDB_SIZE; 232 cmd_len = ISCSI_CDB_SIZE;
239 } 233 }
240 memcpy(hdr->cdb, sc->cmnd, cmd_len); 234 memcpy(hdr->cdb, sc->cmnd, cmd_len);
241 235
242 ctask->imm_count = 0; 236 task->imm_count = 0;
243 if (scsi_bidi_cmnd(sc)) { 237 if (scsi_bidi_cmnd(sc)) {
244 hdr->flags |= ISCSI_FLAG_CMD_READ; 238 hdr->flags |= ISCSI_FLAG_CMD_READ;
245 rc = iscsi_prep_bidi_ahs(ctask); 239 rc = iscsi_prep_bidi_ahs(task);
246 if (rc) 240 if (rc)
247 return rc; 241 return rc;
248 } 242 }
@@ -264,28 +258,28 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
264 * 258 *
265 * pad_count bytes to be sent as zero-padding 259 * pad_count bytes to be sent as zero-padding
266 */ 260 */
267 ctask->unsol_count = 0; 261 task->unsol_count = 0;
268 ctask->unsol_offset = 0; 262 task->unsol_offset = 0;
269 ctask->unsol_datasn = 0; 263 task->unsol_datasn = 0;
270 264
271 if (session->imm_data_en) { 265 if (session->imm_data_en) {
272 if (out_len >= session->first_burst) 266 if (out_len >= session->first_burst)
273 ctask->imm_count = min(session->first_burst, 267 task->imm_count = min(session->first_burst,
274 conn->max_xmit_dlength); 268 conn->max_xmit_dlength);
275 else 269 else
276 ctask->imm_count = min(out_len, 270 task->imm_count = min(out_len,
277 conn->max_xmit_dlength); 271 conn->max_xmit_dlength);
278 hton24(hdr->dlength, ctask->imm_count); 272 hton24(hdr->dlength, task->imm_count);
279 } else 273 } else
280 zero_data(hdr->dlength); 274 zero_data(hdr->dlength);
281 275
282 if (!session->initial_r2t_en) { 276 if (!session->initial_r2t_en) {
283 ctask->unsol_count = min(session->first_burst, out_len) 277 task->unsol_count = min(session->first_burst, out_len)
284 - ctask->imm_count; 278 - task->imm_count;
285 ctask->unsol_offset = ctask->imm_count; 279 task->unsol_offset = task->imm_count;
286 } 280 }
287 281
288 if (!ctask->unsol_count) 282 if (!task->unsol_count)
289 /* No unsolicit Data-Out's */ 283 /* No unsolicit Data-Out's */
290 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 284 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
291 } else { 285 } else {
@@ -298,7 +292,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
298 } 292 }
299 293
300 /* calculate size of additional header segments (AHSs) */ 294 /* calculate size of additional header segments (AHSs) */
301 hdrlength = ctask->hdr_len - sizeof(*hdr); 295 hdrlength = task->hdr_len - sizeof(*hdr);
302 296
303 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1)); 297 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
304 hdrlength /= ISCSI_PAD_LEN; 298 hdrlength /= ISCSI_PAD_LEN;
@@ -306,76 +300,115 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
306 WARN_ON(hdrlength >= 256); 300 WARN_ON(hdrlength >= 256);
307 hdr->hlength = hdrlength & 0xFF; 301 hdr->hlength = hdrlength & 0xFF;
308 302
309 if (conn->session->tt->init_cmd_task(conn->ctask)) 303 if (conn->session->tt->init_task &&
310 return EIO; 304 conn->session->tt->init_task(task))
305 return -EIO;
306
307 task->state = ISCSI_TASK_RUNNING;
308 list_move_tail(&task->running, &conn->run_list);
311 309
312 conn->scsicmd_pdus_cnt++; 310 conn->scsicmd_pdus_cnt++;
313 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x " 311 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
314 "len %d bidi_len %d cmdsn %d win %d]\n", 312 "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
315 scsi_bidi_cmnd(sc) ? "bidirectional" : 313 "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
316 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", 314 "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
317 conn->id, sc, sc->cmnd[0], ctask->itt, 315 scsi_bufflen(sc),
318 scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, 316 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
319 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); 317 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
320 return 0; 318 return 0;
321} 319}
322 320
323/** 321/**
324 * iscsi_complete_command - return command back to scsi-ml 322 * iscsi_complete_command - finish a task
325 * @ctask: iscsi cmd task 323 * @task: iscsi cmd task
326 * 324 *
327 * Must be called with session lock. 325 * Must be called with session lock.
328 * This function returns the scsi command to scsi-ml and returns 326 * This function returns the scsi command to scsi-ml or cleans
329 * the cmd task to the pool of available cmd tasks. 327 * up mgmt tasks then returns the task to the pool.
330 */ 328 */
331static void iscsi_complete_command(struct iscsi_cmd_task *ctask) 329static void iscsi_complete_command(struct iscsi_task *task)
332{ 330{
333 struct iscsi_conn *conn = ctask->conn; 331 struct iscsi_conn *conn = task->conn;
334 struct iscsi_session *session = conn->session; 332 struct iscsi_session *session = conn->session;
335 struct scsi_cmnd *sc = ctask->sc; 333 struct scsi_cmnd *sc = task->sc;
336 334
337 ctask->state = ISCSI_TASK_COMPLETED; 335 list_del_init(&task->running);
338 ctask->sc = NULL; 336 task->state = ISCSI_TASK_COMPLETED;
339 /* SCSI eh reuses commands to verify us */ 337 task->sc = NULL;
340 sc->SCp.ptr = NULL; 338
341 if (conn->ctask == ctask) 339 if (conn->task == task)
342 conn->ctask = NULL; 340 conn->task = NULL;
343 list_del_init(&ctask->running); 341 /*
344 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 342 * login task is preallocated so do not free
345 sc->scsi_done(sc); 343 */
344 if (conn->login_task == task)
345 return;
346
347 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
348
349 if (conn->ping_task == task)
350 conn->ping_task = NULL;
351
352 if (sc) {
353 task->sc = NULL;
354 /* SCSI eh reuses commands to verify us */
355 sc->SCp.ptr = NULL;
356 /*
357 * queue command may call this to free the task, but
358 * not have setup the sc callback
359 */
360 if (sc->scsi_done)
361 sc->scsi_done(sc);
362 }
363}
364
365void __iscsi_get_task(struct iscsi_task *task)
366{
367 atomic_inc(&task->refcount);
346} 368}
369EXPORT_SYMBOL_GPL(__iscsi_get_task);
347 370
348static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask) 371static void __iscsi_put_task(struct iscsi_task *task)
349{ 372{
350 atomic_inc(&ctask->refcount); 373 if (atomic_dec_and_test(&task->refcount))
374 iscsi_complete_command(task);
351} 375}
352 376
353static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask) 377void iscsi_put_task(struct iscsi_task *task)
354{ 378{
355 if (atomic_dec_and_test(&ctask->refcount)) 379 struct iscsi_session *session = task->conn->session;
356 iscsi_complete_command(ctask); 380
381 spin_lock_bh(&session->lock);
382 __iscsi_put_task(task);
383 spin_unlock_bh(&session->lock);
357} 384}
385EXPORT_SYMBOL_GPL(iscsi_put_task);
358 386
359/* 387/*
360 * session lock must be held 388 * session lock must be held
361 */ 389 */
362static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 390static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
363 int err) 391 int err)
364{ 392{
365 struct scsi_cmnd *sc; 393 struct scsi_cmnd *sc;
366 394
367 sc = ctask->sc; 395 sc = task->sc;
368 if (!sc) 396 if (!sc)
369 return; 397 return;
370 398
371 if (ctask->state == ISCSI_TASK_PENDING) 399 if (task->state == ISCSI_TASK_PENDING)
372 /* 400 /*
373 * cmd never made it to the xmit thread, so we should not count 401 * cmd never made it to the xmit thread, so we should not count
374 * the cmd in the sequencing 402 * the cmd in the sequencing
375 */ 403 */
376 conn->session->queued_cmdsn--; 404 conn->session->queued_cmdsn--;
377 else 405 else
378 conn->session->tt->cleanup_cmd_task(conn, ctask); 406 conn->session->tt->cleanup_task(conn, task);
407 /*
408 * Check if cleanup_task dropped the lock and the command completed,
409 */
410 if (!task->sc)
411 return;
379 412
380 sc->result = err; 413 sc->result = err;
381 if (!scsi_bidi_cmnd(sc)) 414 if (!scsi_bidi_cmnd(sc))
@@ -384,39 +417,63 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
384 scsi_out(sc)->resid = scsi_out(sc)->length; 417 scsi_out(sc)->resid = scsi_out(sc)->length;
385 scsi_in(sc)->resid = scsi_in(sc)->length; 418 scsi_in(sc)->resid = scsi_in(sc)->length;
386 } 419 }
387 if (conn->ctask == ctask) 420
388 conn->ctask = NULL; 421 if (conn->task == task)
422 conn->task = NULL;
389 /* release ref from queuecommand */ 423 /* release ref from queuecommand */
390 __iscsi_put_ctask(ctask); 424 __iscsi_put_task(task);
391} 425}
392 426
393/** 427static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
394 * iscsi_free_mgmt_task - return mgmt task back to pool 428 struct iscsi_task *task)
395 * @conn: iscsi connection
396 * @mtask: mtask
397 *
398 * Must be called with session lock.
399 */
400void iscsi_free_mgmt_task(struct iscsi_conn *conn,
401 struct iscsi_mgmt_task *mtask)
402{ 429{
403 list_del_init(&mtask->running); 430 struct iscsi_session *session = conn->session;
404 if (conn->login_mtask == mtask) 431 struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
405 return; 432 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
433
434 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
435 return -ENOTCONN;
436
437 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
438 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
439 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
440 /*
441 * pre-format CmdSN for outgoing PDU.
442 */
443 nop->cmdsn = cpu_to_be32(session->cmdsn);
444 if (hdr->itt != RESERVED_ITT) {
445 hdr->itt = build_itt(task->itt, session->age);
446 /*
447 * TODO: We always use immediate, so we never hit this.
448 * If we start to send tmfs or nops as non-immediate then
449 * we should start checking the cmdsn numbers for mgmt tasks.
450 */
451 if (conn->c_stage == ISCSI_CONN_STARTED &&
452 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
453 session->queued_cmdsn++;
454 session->cmdsn++;
455 }
456 }
406 457
407 if (conn->ping_mtask == mtask) 458 if (session->tt->init_task)
408 conn->ping_mtask = NULL; 459 session->tt->init_task(task);
409 __kfifo_put(conn->session->mgmtpool.queue, 460
410 (void*)&mtask, sizeof(void*)); 461 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
462 session->state = ISCSI_STATE_LOGGING_OUT;
463
464 list_move_tail(&task->running, &conn->mgmt_run_list);
465 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
466 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
467 task->data_count);
468 return 0;
411} 469}
412EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
413 470
414static struct iscsi_mgmt_task * 471static struct iscsi_task *
415__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 472__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
416 char *data, uint32_t data_size) 473 char *data, uint32_t data_size)
417{ 474{
418 struct iscsi_session *session = conn->session; 475 struct iscsi_session *session = conn->session;
419 struct iscsi_mgmt_task *mtask; 476 struct iscsi_task *task;
420 477
421 if (session->state == ISCSI_STATE_TERMINATE) 478 if (session->state == ISCSI_STATE_TERMINATE)
422 return NULL; 479 return NULL;
@@ -426,29 +483,56 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
426 /* 483 /*
427 * Login and Text are sent serially, in 484 * Login and Text are sent serially, in
428 * request-followed-by-response sequence. 485 * request-followed-by-response sequence.
429 * Same mtask can be used. Same ITT must be used. 486 * Same task can be used. Same ITT must be used.
430 * Note that login_mtask is preallocated at conn_create(). 487 * Note that login_task is preallocated at conn_create().
431 */ 488 */
432 mtask = conn->login_mtask; 489 task = conn->login_task;
433 else { 490 else {
434 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 491 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
435 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 492 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
436 493
437 if (!__kfifo_get(session->mgmtpool.queue, 494 if (!__kfifo_get(session->cmdpool.queue,
438 (void*)&mtask, sizeof(void*))) 495 (void*)&task, sizeof(void*)))
439 return NULL; 496 return NULL;
497
498 if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
499 hdr->ttt == RESERVED_ITT) {
500 conn->ping_task = task;
501 conn->last_ping = jiffies;
502 }
440 } 503 }
504 /*
505 * released in complete pdu for task we expect a response for, and
506 * released by the lld when it has transmitted the task for
507 * pdus we do not expect a response for.
508 */
509 atomic_set(&task->refcount, 1);
510 task->conn = conn;
511 task->sc = NULL;
441 512
442 if (data_size) { 513 if (data_size) {
443 memcpy(mtask->data, data, data_size); 514 memcpy(task->data, data, data_size);
444 mtask->data_count = data_size; 515 task->data_count = data_size;
516 } else
517 task->data_count = 0;
518
519 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
520 INIT_LIST_HEAD(&task->running);
521 list_add_tail(&task->running, &conn->mgmtqueue);
522
523 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
524 if (iscsi_prep_mgmt_task(conn, task)) {
525 __iscsi_put_task(task);
526 return NULL;
527 }
528
529 if (session->tt->xmit_task(task))
530 task = NULL;
531
445 } else 532 } else
446 mtask->data_count = 0; 533 scsi_queue_work(conn->session->host, &conn->xmitwork);
447 534
448 memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr)); 535 return task;
449 INIT_LIST_HEAD(&mtask->running);
450 list_add_tail(&mtask->running, &conn->mgmtqueue);
451 return mtask;
452} 536}
453 537
454int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 538int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -462,7 +546,6 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
462 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size)) 546 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
463 err = -EPERM; 547 err = -EPERM;
464 spin_unlock_bh(&session->lock); 548 spin_unlock_bh(&session->lock);
465 scsi_queue_work(session->host, &conn->xmitwork);
466 return err; 549 return err;
467} 550}
468EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); 551EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -471,7 +554,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
471 * iscsi_cmd_rsp - SCSI Command Response processing 554 * iscsi_cmd_rsp - SCSI Command Response processing
472 * @conn: iscsi connection 555 * @conn: iscsi connection
473 * @hdr: iscsi header 556 * @hdr: iscsi header
474 * @ctask: scsi command task 557 * @task: scsi command task
475 * @data: cmd data buffer 558 * @data: cmd data buffer
476 * @datalen: len of buffer 559 * @datalen: len of buffer
477 * 560 *
@@ -479,12 +562,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
479 * then completes the command and task. 562 * then completes the command and task.
480 **/ 563 **/
481static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 564static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
482 struct iscsi_cmd_task *ctask, char *data, 565 struct iscsi_task *task, char *data,
483 int datalen) 566 int datalen)
484{ 567{
485 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; 568 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
486 struct iscsi_session *session = conn->session; 569 struct iscsi_session *session = conn->session;
487 struct scsi_cmnd *sc = ctask->sc; 570 struct scsi_cmnd *sc = task->sc;
488 571
489 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 572 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
490 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 573 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
@@ -508,7 +591,7 @@ invalid_datalen:
508 goto out; 591 goto out;
509 } 592 }
510 593
511 senselen = be16_to_cpu(get_unaligned((__be16 *) data)); 594 senselen = get_unaligned_be16(data);
512 if (datalen < senselen) 595 if (datalen < senselen)
513 goto invalid_datalen; 596 goto invalid_datalen;
514 597
@@ -544,10 +627,10 @@ invalid_datalen:
544 } 627 }
545out: 628out:
546 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 629 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
547 (long)sc, sc->result, ctask->itt); 630 (long)sc, sc->result, task->itt);
548 conn->scsirsp_pdus_cnt++; 631 conn->scsirsp_pdus_cnt++;
549 632
550 __iscsi_put_ctask(ctask); 633 __iscsi_put_task(task);
551} 634}
552 635
553static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 636static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -572,9 +655,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
572static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) 655static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
573{ 656{
574 struct iscsi_nopout hdr; 657 struct iscsi_nopout hdr;
575 struct iscsi_mgmt_task *mtask; 658 struct iscsi_task *task;
576 659
577 if (!rhdr && conn->ping_mtask) 660 if (!rhdr && conn->ping_task)
578 return; 661 return;
579 662
580 memset(&hdr, 0, sizeof(struct iscsi_nopout)); 663 memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -588,18 +671,9 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
588 } else 671 } else
589 hdr.ttt = RESERVED_ITT; 672 hdr.ttt = RESERVED_ITT;
590 673
591 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 674 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
592 if (!mtask) { 675 if (!task)
593 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); 676 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
594 return;
595 }
596
597 /* only track our nops */
598 if (!rhdr) {
599 conn->ping_mtask = mtask;
600 conn->last_ping = jiffies;
601 }
602 scsi_queue_work(conn->session->host, &conn->xmitwork);
603} 677}
604 678
605static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 679static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -628,6 +702,31 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
628} 702}
629 703
630/** 704/**
705 * iscsi_itt_to_task - look up task by itt
706 * @conn: iscsi connection
707 * @itt: itt
708 *
709 * This should be used for mgmt tasks like login and nops, or if
710 * the LDD's itt space does not include the session age.
711 *
712 * The session lock must be held.
713 */
714static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
715{
716 struct iscsi_session *session = conn->session;
717 uint32_t i;
718
719 if (itt == RESERVED_ITT)
720 return NULL;
721
722 i = get_itt(itt);
723 if (i >= session->cmds_max)
724 return NULL;
725
726 return session->cmds[i];
727}
728
729/**
631 * __iscsi_complete_pdu - complete pdu 730 * __iscsi_complete_pdu - complete pdu
632 * @conn: iscsi conn 731 * @conn: iscsi conn
633 * @hdr: iscsi header 732 * @hdr: iscsi header
@@ -638,108 +737,28 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
638 * queuecommand or send generic. session lock must be held and verify 737 * queuecommand or send generic. session lock must be held and verify
639 * itt must have been called. 738 * itt must have been called.
640 */ 739 */
641static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 740int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
642 char *data, int datalen) 741 char *data, int datalen)
643{ 742{
644 struct iscsi_session *session = conn->session; 743 struct iscsi_session *session = conn->session;
645 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; 744 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
646 struct iscsi_cmd_task *ctask; 745 struct iscsi_task *task;
647 struct iscsi_mgmt_task *mtask;
648 uint32_t itt; 746 uint32_t itt;
649 747
650 conn->last_recv = jiffies; 748 conn->last_recv = jiffies;
749 rc = iscsi_verify_itt(conn, hdr->itt);
750 if (rc)
751 return rc;
752
651 if (hdr->itt != RESERVED_ITT) 753 if (hdr->itt != RESERVED_ITT)
652 itt = get_itt(hdr->itt); 754 itt = get_itt(hdr->itt);
653 else 755 else
654 itt = ~0U; 756 itt = ~0U;
655 757
656 if (itt < session->cmds_max) { 758 debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
657 ctask = session->cmds[itt]; 759 opcode, conn->id, itt, datalen);
658
659 debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
660 opcode, conn->id, ctask->itt, datalen);
661
662 switch(opcode) {
663 case ISCSI_OP_SCSI_CMD_RSP:
664 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
665 iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
666 datalen);
667 break;
668 case ISCSI_OP_SCSI_DATA_IN:
669 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
670 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
671 conn->scsirsp_pdus_cnt++;
672 __iscsi_put_ctask(ctask);
673 }
674 break;
675 case ISCSI_OP_R2T:
676 /* LLD handles this for now */
677 break;
678 default:
679 rc = ISCSI_ERR_BAD_OPCODE;
680 break;
681 }
682 } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
683 itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
684 mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
685
686 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
687 opcode, conn->id, mtask->itt, datalen);
688 760
689 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 761 if (itt == ~0U) {
690 switch(opcode) {
691 case ISCSI_OP_LOGOUT_RSP:
692 if (datalen) {
693 rc = ISCSI_ERR_PROTO;
694 break;
695 }
696 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
697 /* fall through */
698 case ISCSI_OP_LOGIN_RSP:
699 case ISCSI_OP_TEXT_RSP:
700 /*
701 * login related PDU's exp_statsn is handled in
702 * userspace
703 */
704 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
705 rc = ISCSI_ERR_CONN_FAILED;
706 iscsi_free_mgmt_task(conn, mtask);
707 break;
708 case ISCSI_OP_SCSI_TMFUNC_RSP:
709 if (datalen) {
710 rc = ISCSI_ERR_PROTO;
711 break;
712 }
713
714 iscsi_tmf_rsp(conn, hdr);
715 iscsi_free_mgmt_task(conn, mtask);
716 break;
717 case ISCSI_OP_NOOP_IN:
718 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
719 datalen) {
720 rc = ISCSI_ERR_PROTO;
721 break;
722 }
723 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
724
725 if (conn->ping_mtask != mtask) {
726 /*
727 * If this is not in response to one of our
728 * nops then it must be from userspace.
729 */
730 if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
731 datalen))
732 rc = ISCSI_ERR_CONN_FAILED;
733 } else
734 mod_timer(&conn->transport_timer,
735 jiffies + conn->recv_timeout);
736 iscsi_free_mgmt_task(conn, mtask);
737 break;
738 default:
739 rc = ISCSI_ERR_BAD_OPCODE;
740 break;
741 }
742 } else if (itt == ~0U) {
743 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 762 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
744 763
745 switch(opcode) { 764 switch(opcode) {
@@ -766,11 +785,104 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
766 rc = ISCSI_ERR_BAD_OPCODE; 785 rc = ISCSI_ERR_BAD_OPCODE;
767 break; 786 break;
768 } 787 }
769 } else 788 goto out;
770 rc = ISCSI_ERR_BAD_ITT; 789 }
771 790
791 switch(opcode) {
792 case ISCSI_OP_SCSI_CMD_RSP:
793 case ISCSI_OP_SCSI_DATA_IN:
794 task = iscsi_itt_to_ctask(conn, hdr->itt);
795 if (!task)
796 return ISCSI_ERR_BAD_ITT;
797 break;
798 case ISCSI_OP_R2T:
799 /*
800 * LLD handles R2Ts if they need to.
801 */
802 return 0;
803 case ISCSI_OP_LOGOUT_RSP:
804 case ISCSI_OP_LOGIN_RSP:
805 case ISCSI_OP_TEXT_RSP:
806 case ISCSI_OP_SCSI_TMFUNC_RSP:
807 case ISCSI_OP_NOOP_IN:
808 task = iscsi_itt_to_task(conn, hdr->itt);
809 if (!task)
810 return ISCSI_ERR_BAD_ITT;
811 break;
812 default:
813 return ISCSI_ERR_BAD_OPCODE;
814 }
815
816 switch(opcode) {
817 case ISCSI_OP_SCSI_CMD_RSP:
818 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
819 break;
820 case ISCSI_OP_SCSI_DATA_IN:
821 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
822 conn->scsirsp_pdus_cnt++;
823 iscsi_update_cmdsn(session,
824 (struct iscsi_nopin*) hdr);
825 __iscsi_put_task(task);
826 }
827 break;
828 case ISCSI_OP_LOGOUT_RSP:
829 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
830 if (datalen) {
831 rc = ISCSI_ERR_PROTO;
832 break;
833 }
834 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
835 goto recv_pdu;
836 case ISCSI_OP_LOGIN_RSP:
837 case ISCSI_OP_TEXT_RSP:
838 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
839 /*
840 * login related PDU's exp_statsn is handled in
841 * userspace
842 */
843 goto recv_pdu;
844 case ISCSI_OP_SCSI_TMFUNC_RSP:
845 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
846 if (datalen) {
847 rc = ISCSI_ERR_PROTO;
848 break;
849 }
850
851 iscsi_tmf_rsp(conn, hdr);
852 __iscsi_put_task(task);
853 break;
854 case ISCSI_OP_NOOP_IN:
855 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
856 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
857 rc = ISCSI_ERR_PROTO;
858 break;
859 }
860 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
861
862 if (conn->ping_task != task)
863 /*
864 * If this is not in response to one of our
865 * nops then it must be from userspace.
866 */
867 goto recv_pdu;
868
869 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
870 __iscsi_put_task(task);
871 break;
872 default:
873 rc = ISCSI_ERR_BAD_OPCODE;
874 break;
875 }
876
877out:
878 return rc;
879recv_pdu:
880 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
881 rc = ISCSI_ERR_CONN_FAILED;
882 __iscsi_put_task(task);
772 return rc; 883 return rc;
773} 884}
885EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
774 886
775int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 887int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
776 char *data, int datalen) 888 char *data, int datalen)
@@ -784,51 +896,63 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
784} 896}
785EXPORT_SYMBOL_GPL(iscsi_complete_pdu); 897EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
786 898
787/* verify itt (itt encoding: age+cid+itt) */ 899int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
788int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
789 uint32_t *ret_itt)
790{ 900{
791 struct iscsi_session *session = conn->session; 901 struct iscsi_session *session = conn->session;
792 struct iscsi_cmd_task *ctask; 902 uint32_t i;
793 uint32_t itt;
794 903
795 if (hdr->itt != RESERVED_ITT) { 904 if (itt == RESERVED_ITT)
796 if (((__force u32)hdr->itt & ISCSI_AGE_MASK) != 905 return 0;
797 (session->age << ISCSI_AGE_SHIFT)) {
798 iscsi_conn_printk(KERN_ERR, conn,
799 "received itt %x expected session "
800 "age (%x)\n", (__force u32)hdr->itt,
801 session->age & ISCSI_AGE_MASK);
802 return ISCSI_ERR_BAD_ITT;
803 }
804 906
805 itt = get_itt(hdr->itt); 907 if (((__force u32)itt & ISCSI_AGE_MASK) !=
806 } else 908 (session->age << ISCSI_AGE_SHIFT)) {
807 itt = ~0U; 909 iscsi_conn_printk(KERN_ERR, conn,
910 "received itt %x expected session age (%x)\n",
911 (__force u32)itt, session->age);
912 return ISCSI_ERR_BAD_ITT;
913 }
808 914
809 if (itt < session->cmds_max) { 915 i = get_itt(itt);
810 ctask = session->cmds[itt]; 916 if (i >= session->cmds_max) {
917 iscsi_conn_printk(KERN_ERR, conn,
918 "received invalid itt index %u (max cmds "
919 "%u.\n", i, session->cmds_max);
920 return ISCSI_ERR_BAD_ITT;
921 }
922 return 0;
923}
924EXPORT_SYMBOL_GPL(iscsi_verify_itt);
811 925
812 if (!ctask->sc) { 926/**
813 iscsi_conn_printk(KERN_INFO, conn, "dropping ctask " 927 * iscsi_itt_to_ctask - look up ctask by itt
814 "with itt 0x%x\n", ctask->itt); 928 * @conn: iscsi connection
815 /* force drop */ 929 * @itt: itt
816 return ISCSI_ERR_NO_SCSI_CMD; 930 *
817 } 931 * This should be used for cmd tasks.
932 *
933 * The session lock must be held.
934 */
935struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
936{
937 struct iscsi_task *task;
818 938
819 if (ctask->sc->SCp.phase != session->age) { 939 if (iscsi_verify_itt(conn, itt))
820 iscsi_conn_printk(KERN_ERR, conn, 940 return NULL;
821 "iscsi: ctask's session age %d, " 941
822 "expected %d\n", ctask->sc->SCp.phase, 942 task = iscsi_itt_to_task(conn, itt);
823 session->age); 943 if (!task || !task->sc)
824 return ISCSI_ERR_SESSION_FAILED; 944 return NULL;
825 } 945
946 if (task->sc->SCp.phase != conn->session->age) {
947 iscsi_session_printk(KERN_ERR, conn->session,
948 "task's session age %d, expected %d\n",
949 task->sc->SCp.phase, conn->session->age);
950 return NULL;
826 } 951 }
827 952
828 *ret_itt = itt; 953 return task;
829 return 0;
830} 954}
831EXPORT_SYMBOL_GPL(iscsi_verify_itt); 955EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
832 956
833void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) 957void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
834{ 958{
@@ -850,61 +974,6 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
850} 974}
851EXPORT_SYMBOL_GPL(iscsi_conn_failure); 975EXPORT_SYMBOL_GPL(iscsi_conn_failure);
852 976
853static void iscsi_prep_mtask(struct iscsi_conn *conn,
854 struct iscsi_mgmt_task *mtask)
855{
856 struct iscsi_session *session = conn->session;
857 struct iscsi_hdr *hdr = mtask->hdr;
858 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
859
860 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
861 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
862 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
863 /*
864 * pre-format CmdSN for outgoing PDU.
865 */
866 nop->cmdsn = cpu_to_be32(session->cmdsn);
867 if (hdr->itt != RESERVED_ITT) {
868 hdr->itt = build_itt(mtask->itt, session->age);
869 /*
870 * TODO: We always use immediate, so we never hit this.
871 * If we start to send tmfs or nops as non-immediate then
872 * we should start checking the cmdsn numbers for mgmt tasks.
873 */
874 if (conn->c_stage == ISCSI_CONN_STARTED &&
875 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
876 session->queued_cmdsn++;
877 session->cmdsn++;
878 }
879 }
880
881 if (session->tt->init_mgmt_task)
882 session->tt->init_mgmt_task(conn, mtask);
883
884 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
885 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
886 mtask->data_count);
887}
888
889static int iscsi_xmit_mtask(struct iscsi_conn *conn)
890{
891 struct iscsi_hdr *hdr = conn->mtask->hdr;
892 int rc;
893
894 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
895 conn->session->state = ISCSI_STATE_LOGGING_OUT;
896 spin_unlock_bh(&conn->session->lock);
897
898 rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
899 spin_lock_bh(&conn->session->lock);
900 if (rc)
901 return rc;
902
903 /* done with this in-progress mtask */
904 conn->mtask = NULL;
905 return 0;
906}
907
908static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) 977static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
909{ 978{
910 struct iscsi_session *session = conn->session; 979 struct iscsi_session *session = conn->session;
@@ -922,37 +991,38 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
922 return 0; 991 return 0;
923} 992}
924 993
925static int iscsi_xmit_ctask(struct iscsi_conn *conn) 994static int iscsi_xmit_task(struct iscsi_conn *conn)
926{ 995{
927 struct iscsi_cmd_task *ctask = conn->ctask; 996 struct iscsi_task *task = conn->task;
928 int rc; 997 int rc;
929 998
930 __iscsi_get_ctask(ctask); 999 __iscsi_get_task(task);
931 spin_unlock_bh(&conn->session->lock); 1000 spin_unlock_bh(&conn->session->lock);
932 rc = conn->session->tt->xmit_cmd_task(conn, ctask); 1001 rc = conn->session->tt->xmit_task(task);
933 spin_lock_bh(&conn->session->lock); 1002 spin_lock_bh(&conn->session->lock);
934 __iscsi_put_ctask(ctask); 1003 __iscsi_put_task(task);
935 if (!rc) 1004 if (!rc)
936 /* done with this ctask */ 1005 /* done with this task */
937 conn->ctask = NULL; 1006 conn->task = NULL;
938 return rc; 1007 return rc;
939} 1008}
940 1009
941/** 1010/**
942 * iscsi_requeue_ctask - requeue ctask to run from session workqueue 1011 * iscsi_requeue_task - requeue task to run from session workqueue
943 * @ctask: ctask to requeue 1012 * @task: task to requeue
944 * 1013 *
945 * LLDs that need to run a ctask from the session workqueue should call 1014 * LLDs that need to run a task from the session workqueue should call
946 * this. The session lock must be held. 1015 * this. The session lock must be held. This should only be called
1016 * by software drivers.
947 */ 1017 */
948void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask) 1018void iscsi_requeue_task(struct iscsi_task *task)
949{ 1019{
950 struct iscsi_conn *conn = ctask->conn; 1020 struct iscsi_conn *conn = task->conn;
951 1021
952 list_move_tail(&ctask->running, &conn->requeue); 1022 list_move_tail(&task->running, &conn->requeue);
953 scsi_queue_work(conn->session->host, &conn->xmitwork); 1023 scsi_queue_work(conn->session->host, &conn->xmitwork);
954} 1024}
955EXPORT_SYMBOL_GPL(iscsi_requeue_ctask); 1025EXPORT_SYMBOL_GPL(iscsi_requeue_task);
956 1026
957/** 1027/**
958 * iscsi_data_xmit - xmit any command into the scheduled connection 1028 * iscsi_data_xmit - xmit any command into the scheduled connection
@@ -974,14 +1044,8 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
974 return -ENODATA; 1044 return -ENODATA;
975 } 1045 }
976 1046
977 if (conn->ctask) { 1047 if (conn->task) {
978 rc = iscsi_xmit_ctask(conn); 1048 rc = iscsi_xmit_task(conn);
979 if (rc)
980 goto again;
981 }
982
983 if (conn->mtask) {
984 rc = iscsi_xmit_mtask(conn);
985 if (rc) 1049 if (rc)
986 goto again; 1050 goto again;
987 } 1051 }
@@ -993,17 +1057,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
993 */ 1057 */
994check_mgmt: 1058check_mgmt:
995 while (!list_empty(&conn->mgmtqueue)) { 1059 while (!list_empty(&conn->mgmtqueue)) {
996 conn->mtask = list_entry(conn->mgmtqueue.next, 1060 conn->task = list_entry(conn->mgmtqueue.next,
997 struct iscsi_mgmt_task, running); 1061 struct iscsi_task, running);
998 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1062 if (iscsi_prep_mgmt_task(conn, conn->task)) {
999 iscsi_free_mgmt_task(conn, conn->mtask); 1063 __iscsi_put_task(conn->task);
1000 conn->mtask = NULL; 1064 conn->task = NULL;
1001 continue; 1065 continue;
1002 } 1066 }
1003 1067 rc = iscsi_xmit_task(conn);
1004 iscsi_prep_mtask(conn, conn->mtask);
1005 list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
1006 rc = iscsi_xmit_mtask(conn);
1007 if (rc) 1068 if (rc)
1008 goto again; 1069 goto again;
1009 } 1070 }
@@ -1013,24 +1074,21 @@ check_mgmt:
1013 if (conn->tmf_state == TMF_QUEUED) 1074 if (conn->tmf_state == TMF_QUEUED)
1014 break; 1075 break;
1015 1076
1016 conn->ctask = list_entry(conn->xmitqueue.next, 1077 conn->task = list_entry(conn->xmitqueue.next,
1017 struct iscsi_cmd_task, running); 1078 struct iscsi_task, running);
1018 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1079 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1019 fail_command(conn, conn->ctask, DID_IMM_RETRY << 16); 1080 fail_command(conn, conn->task, DID_IMM_RETRY << 16);
1020 continue; 1081 continue;
1021 } 1082 }
1022 if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) { 1083 if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
1023 fail_command(conn, conn->ctask, DID_ABORT << 16); 1084 fail_command(conn, conn->task, DID_ABORT << 16);
1024 continue; 1085 continue;
1025 } 1086 }
1026 1087 rc = iscsi_xmit_task(conn);
1027 conn->ctask->state = ISCSI_TASK_RUNNING;
1028 list_move_tail(conn->xmitqueue.next, &conn->run_list);
1029 rc = iscsi_xmit_ctask(conn);
1030 if (rc) 1088 if (rc)
1031 goto again; 1089 goto again;
1032 /* 1090 /*
1033 * we could continuously get new ctask requests so 1091 * we could continuously get new task requests so
1034 * we need to check the mgmt queue for nops that need to 1092 * we need to check the mgmt queue for nops that need to
1035 * be sent to aviod starvation 1093 * be sent to aviod starvation
1036 */ 1094 */
@@ -1048,11 +1106,11 @@ check_mgmt:
1048 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 1106 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1049 break; 1107 break;
1050 1108
1051 conn->ctask = list_entry(conn->requeue.next, 1109 conn->task = list_entry(conn->requeue.next,
1052 struct iscsi_cmd_task, running); 1110 struct iscsi_task, running);
1053 conn->ctask->state = ISCSI_TASK_RUNNING; 1111 conn->task->state = ISCSI_TASK_RUNNING;
1054 list_move_tail(conn->requeue.next, &conn->run_list); 1112 list_move_tail(conn->requeue.next, &conn->run_list);
1055 rc = iscsi_xmit_ctask(conn); 1113 rc = iscsi_xmit_task(conn);
1056 if (rc) 1114 if (rc)
1057 goto again; 1115 goto again;
1058 if (!list_empty(&conn->mgmtqueue)) 1116 if (!list_empty(&conn->mgmtqueue))
@@ -1096,11 +1154,12 @@ enum {
1096 1154
1097int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 1155int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1098{ 1156{
1157 struct iscsi_cls_session *cls_session;
1099 struct Scsi_Host *host; 1158 struct Scsi_Host *host;
1100 int reason = 0; 1159 int reason = 0;
1101 struct iscsi_session *session; 1160 struct iscsi_session *session;
1102 struct iscsi_conn *conn; 1161 struct iscsi_conn *conn;
1103 struct iscsi_cmd_task *ctask = NULL; 1162 struct iscsi_task *task = NULL;
1104 1163
1105 sc->scsi_done = done; 1164 sc->scsi_done = done;
1106 sc->result = 0; 1165 sc->result = 0;
@@ -1109,10 +1168,11 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1109 host = sc->device->host; 1168 host = sc->device->host;
1110 spin_unlock(host->host_lock); 1169 spin_unlock(host->host_lock);
1111 1170
1112 session = iscsi_hostdata(host->hostdata); 1171 cls_session = starget_to_session(scsi_target(sc->device));
1172 session = cls_session->dd_data;
1113 spin_lock(&session->lock); 1173 spin_lock(&session->lock);
1114 1174
1115 reason = iscsi_session_chkready(session_to_cls(session)); 1175 reason = iscsi_session_chkready(cls_session);
1116 if (reason) { 1176 if (reason) {
1117 sc->result = reason; 1177 sc->result = reason;
1118 goto fault; 1178 goto fault;
@@ -1167,26 +1227,39 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1167 goto reject; 1227 goto reject;
1168 } 1228 }
1169 1229
1170 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask, 1230 if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
1171 sizeof(void*))) { 1231 sizeof(void*))) {
1172 reason = FAILURE_OOM; 1232 reason = FAILURE_OOM;
1173 goto reject; 1233 goto reject;
1174 } 1234 }
1175 session->queued_cmdsn++;
1176
1177 sc->SCp.phase = session->age; 1235 sc->SCp.phase = session->age;
1178 sc->SCp.ptr = (char *)ctask; 1236 sc->SCp.ptr = (char *)task;
1179 1237
1180 atomic_set(&ctask->refcount, 1); 1238 atomic_set(&task->refcount, 1);
1181 ctask->state = ISCSI_TASK_PENDING; 1239 task->state = ISCSI_TASK_PENDING;
1182 ctask->conn = conn; 1240 task->conn = conn;
1183 ctask->sc = sc; 1241 task->sc = sc;
1184 INIT_LIST_HEAD(&ctask->running); 1242 INIT_LIST_HEAD(&task->running);
1243 list_add_tail(&task->running, &conn->xmitqueue);
1244
1245 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
1246 if (iscsi_prep_scsi_cmd_pdu(task)) {
1247 sc->result = DID_ABORT << 16;
1248 sc->scsi_done = NULL;
1249 iscsi_complete_command(task);
1250 goto fault;
1251 }
1252 if (session->tt->xmit_task(task)) {
1253 sc->scsi_done = NULL;
1254 iscsi_complete_command(task);
1255 reason = FAILURE_SESSION_NOT_READY;
1256 goto reject;
1257 }
1258 } else
1259 scsi_queue_work(session->host, &conn->xmitwork);
1185 1260
1186 list_add_tail(&ctask->running, &conn->xmitqueue); 1261 session->queued_cmdsn++;
1187 spin_unlock(&session->lock); 1262 spin_unlock(&session->lock);
1188
1189 scsi_queue_work(host, &conn->xmitwork);
1190 spin_lock(host->host_lock); 1263 spin_lock(host->host_lock);
1191 return 0; 1264 return 0;
1192 1265
@@ -1205,7 +1278,7 @@ fault:
1205 scsi_out(sc)->resid = scsi_out(sc)->length; 1278 scsi_out(sc)->resid = scsi_out(sc)->length;
1206 scsi_in(sc)->resid = scsi_in(sc)->length; 1279 scsi_in(sc)->resid = scsi_in(sc)->length;
1207 } 1280 }
1208 sc->scsi_done(sc); 1281 done(sc);
1209 spin_lock(host->host_lock); 1282 spin_lock(host->host_lock);
1210 return 0; 1283 return 0;
1211} 1284}
@@ -1222,7 +1295,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
1222 1295
1223void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) 1296void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
1224{ 1297{
1225 struct iscsi_session *session = class_to_transport_session(cls_session); 1298 struct iscsi_session *session = cls_session->dd_data;
1226 1299
1227 spin_lock_bh(&session->lock); 1300 spin_lock_bh(&session->lock);
1228 if (session->state != ISCSI_STATE_LOGGED_IN) { 1301 if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -1236,9 +1309,13 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
1236 1309
1237int iscsi_eh_host_reset(struct scsi_cmnd *sc) 1310int iscsi_eh_host_reset(struct scsi_cmnd *sc)
1238{ 1311{
1239 struct Scsi_Host *host = sc->device->host; 1312 struct iscsi_cls_session *cls_session;
1240 struct iscsi_session *session = iscsi_hostdata(host->hostdata); 1313 struct iscsi_session *session;
1241 struct iscsi_conn *conn = session->leadconn; 1314 struct iscsi_conn *conn;
1315
1316 cls_session = starget_to_session(scsi_target(sc->device));
1317 session = cls_session->dd_data;
1318 conn = session->leadconn;
1242 1319
1243 mutex_lock(&session->eh_mutex); 1320 mutex_lock(&session->eh_mutex);
1244 spin_lock_bh(&session->lock); 1321 spin_lock_bh(&session->lock);
@@ -1300,11 +1377,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1300 int timeout) 1377 int timeout)
1301{ 1378{
1302 struct iscsi_session *session = conn->session; 1379 struct iscsi_session *session = conn->session;
1303 struct iscsi_mgmt_task *mtask; 1380 struct iscsi_task *task;
1304 1381
1305 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, 1382 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1306 NULL, 0); 1383 NULL, 0);
1307 if (!mtask) { 1384 if (!task) {
1308 spin_unlock_bh(&session->lock); 1385 spin_unlock_bh(&session->lock);
1309 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1386 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1310 spin_lock_bh(&session->lock); 1387 spin_lock_bh(&session->lock);
@@ -1320,7 +1397,6 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1320 1397
1321 spin_unlock_bh(&session->lock); 1398 spin_unlock_bh(&session->lock);
1322 mutex_unlock(&session->eh_mutex); 1399 mutex_unlock(&session->eh_mutex);
1323 scsi_queue_work(session->host, &conn->xmitwork);
1324 1400
1325 /* 1401 /*
1326 * block eh thread until: 1402 * block eh thread until:
@@ -1339,7 +1415,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1339 1415
1340 mutex_lock(&session->eh_mutex); 1416 mutex_lock(&session->eh_mutex);
1341 spin_lock_bh(&session->lock); 1417 spin_lock_bh(&session->lock);
1342 /* if the session drops it will clean up the mtask */ 1418 /* if the session drops it will clean up the task */
1343 if (age != session->age || 1419 if (age != session->age ||
1344 session->state != ISCSI_STATE_LOGGED_IN) 1420 session->state != ISCSI_STATE_LOGGED_IN)
1345 return -ENOTCONN; 1421 return -ENOTCONN;
@@ -1353,48 +1429,51 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1353static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1429static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1354 int error) 1430 int error)
1355{ 1431{
1356 struct iscsi_cmd_task *ctask, *tmp; 1432 struct iscsi_task *task, *tmp;
1357 1433
1358 if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1)) 1434 if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
1359 conn->ctask = NULL; 1435 conn->task = NULL;
1360 1436
1361 /* flush pending */ 1437 /* flush pending */
1362 list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) { 1438 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
1363 if (lun == ctask->sc->device->lun || lun == -1) { 1439 if (lun == task->sc->device->lun || lun == -1) {
1364 debug_scsi("failing pending sc %p itt 0x%x\n", 1440 debug_scsi("failing pending sc %p itt 0x%x\n",
1365 ctask->sc, ctask->itt); 1441 task->sc, task->itt);
1366 fail_command(conn, ctask, error << 16); 1442 fail_command(conn, task, error << 16);
1367 } 1443 }
1368 } 1444 }
1369 1445
1370 list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) { 1446 list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
1371 if (lun == ctask->sc->device->lun || lun == -1) { 1447 if (lun == task->sc->device->lun || lun == -1) {
1372 debug_scsi("failing requeued sc %p itt 0x%x\n", 1448 debug_scsi("failing requeued sc %p itt 0x%x\n",
1373 ctask->sc, ctask->itt); 1449 task->sc, task->itt);
1374 fail_command(conn, ctask, error << 16); 1450 fail_command(conn, task, error << 16);
1375 } 1451 }
1376 } 1452 }
1377 1453
1378 /* fail all other running */ 1454 /* fail all other running */
1379 list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) { 1455 list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
1380 if (lun == ctask->sc->device->lun || lun == -1) { 1456 if (lun == task->sc->device->lun || lun == -1) {
1381 debug_scsi("failing in progress sc %p itt 0x%x\n", 1457 debug_scsi("failing in progress sc %p itt 0x%x\n",
1382 ctask->sc, ctask->itt); 1458 task->sc, task->itt);
1383 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1459 fail_command(conn, task, DID_BUS_BUSY << 16);
1384 } 1460 }
1385 } 1461 }
1386} 1462}
1387 1463
1388static void iscsi_suspend_tx(struct iscsi_conn *conn) 1464void iscsi_suspend_tx(struct iscsi_conn *conn)
1389{ 1465{
1390 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1466 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1391 scsi_flush_work(conn->session->host); 1467 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
1468 scsi_flush_work(conn->session->host);
1392} 1469}
1470EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1393 1471
1394static void iscsi_start_tx(struct iscsi_conn *conn) 1472static void iscsi_start_tx(struct iscsi_conn *conn)
1395{ 1473{
1396 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1474 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1397 scsi_queue_work(conn->session->host, &conn->xmitwork); 1475 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
1476 scsi_queue_work(conn->session->host, &conn->xmitwork);
1398} 1477}
1399 1478
1400static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1479static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1405,7 +1484,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1405 enum scsi_eh_timer_return rc = EH_NOT_HANDLED; 1484 enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
1406 1485
1407 cls_session = starget_to_session(scsi_target(scmd->device)); 1486 cls_session = starget_to_session(scsi_target(scmd->device));
1408 session = class_to_transport_session(cls_session); 1487 session = cls_session->dd_data;
1409 1488
1410 debug_scsi("scsi cmd %p timedout\n", scmd); 1489 debug_scsi("scsi cmd %p timedout\n", scmd);
1411 1490
@@ -1443,7 +1522,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1443 jiffies)) 1522 jiffies))
1444 rc = EH_RESET_TIMER; 1523 rc = EH_RESET_TIMER;
1445 /* if in the middle of checking the transport then give us more time */ 1524 /* if in the middle of checking the transport then give us more time */
1446 if (conn->ping_mtask) 1525 if (conn->ping_task)
1447 rc = EH_RESET_TIMER; 1526 rc = EH_RESET_TIMER;
1448done: 1527done:
1449 spin_unlock(&session->lock); 1528 spin_unlock(&session->lock);
@@ -1467,7 +1546,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1467 1546
1468 recv_timeout *= HZ; 1547 recv_timeout *= HZ;
1469 last_recv = conn->last_recv; 1548 last_recv = conn->last_recv;
1470 if (conn->ping_mtask && 1549 if (conn->ping_task &&
1471 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), 1550 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
1472 jiffies)) { 1551 jiffies)) {
1473 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1552 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
@@ -1493,27 +1572,30 @@ done:
1493 spin_unlock(&session->lock); 1572 spin_unlock(&session->lock);
1494} 1573}
1495 1574
1496static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask, 1575static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
1497 struct iscsi_tm *hdr) 1576 struct iscsi_tm *hdr)
1498{ 1577{
1499 memset(hdr, 0, sizeof(*hdr)); 1578 memset(hdr, 0, sizeof(*hdr));
1500 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 1579 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1501 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; 1580 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
1502 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 1581 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1503 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 1582 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
1504 hdr->rtt = ctask->hdr->itt; 1583 hdr->rtt = task->hdr->itt;
1505 hdr->refcmdsn = ctask->hdr->cmdsn; 1584 hdr->refcmdsn = task->hdr->cmdsn;
1506} 1585}
1507 1586
1508int iscsi_eh_abort(struct scsi_cmnd *sc) 1587int iscsi_eh_abort(struct scsi_cmnd *sc)
1509{ 1588{
1510 struct Scsi_Host *host = sc->device->host; 1589 struct iscsi_cls_session *cls_session;
1511 struct iscsi_session *session = iscsi_hostdata(host->hostdata); 1590 struct iscsi_session *session;
1512 struct iscsi_conn *conn; 1591 struct iscsi_conn *conn;
1513 struct iscsi_cmd_task *ctask; 1592 struct iscsi_task *task;
1514 struct iscsi_tm *hdr; 1593 struct iscsi_tm *hdr;
1515 int rc, age; 1594 int rc, age;
1516 1595
1596 cls_session = starget_to_session(scsi_target(sc->device));
1597 session = cls_session->dd_data;
1598
1517 mutex_lock(&session->eh_mutex); 1599 mutex_lock(&session->eh_mutex);
1518 spin_lock_bh(&session->lock); 1600 spin_lock_bh(&session->lock);
1519 /* 1601 /*
@@ -1542,17 +1624,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1542 conn->eh_abort_cnt++; 1624 conn->eh_abort_cnt++;
1543 age = session->age; 1625 age = session->age;
1544 1626
1545 ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; 1627 task = (struct iscsi_task *)sc->SCp.ptr;
1546 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt); 1628 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
1547 1629
1548 /* ctask completed before time out */ 1630 /* task completed before time out */
1549 if (!ctask->sc) { 1631 if (!task->sc) {
1550 debug_scsi("sc completed while abort in progress\n"); 1632 debug_scsi("sc completed while abort in progress\n");
1551 goto success; 1633 goto success;
1552 } 1634 }
1553 1635
1554 if (ctask->state == ISCSI_TASK_PENDING) { 1636 if (task->state == ISCSI_TASK_PENDING) {
1555 fail_command(conn, ctask, DID_ABORT << 16); 1637 fail_command(conn, task, DID_ABORT << 16);
1556 goto success; 1638 goto success;
1557 } 1639 }
1558 1640
@@ -1562,7 +1644,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1562 conn->tmf_state = TMF_QUEUED; 1644 conn->tmf_state = TMF_QUEUED;
1563 1645
1564 hdr = &conn->tmhdr; 1646 hdr = &conn->tmhdr;
1565 iscsi_prep_abort_task_pdu(ctask, hdr); 1647 iscsi_prep_abort_task_pdu(task, hdr);
1566 1648
1567 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) { 1649 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
1568 rc = FAILED; 1650 rc = FAILED;
@@ -1572,16 +1654,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1572 switch (conn->tmf_state) { 1654 switch (conn->tmf_state) {
1573 case TMF_SUCCESS: 1655 case TMF_SUCCESS:
1574 spin_unlock_bh(&session->lock); 1656 spin_unlock_bh(&session->lock);
1657 /*
1658 * stop tx side incase the target had sent a abort rsp but
1659 * the initiator was still writing out data.
1660 */
1575 iscsi_suspend_tx(conn); 1661 iscsi_suspend_tx(conn);
1576 /* 1662 /*
1577 * clean up task if aborted. grab the recv lock as a writer 1663 * we do not stop the recv side because targets have been
1664 * good and have never sent us a successful tmf response
1665 * then sent more data for the cmd.
1578 */ 1666 */
1579 write_lock_bh(conn->recv_lock);
1580 spin_lock(&session->lock); 1667 spin_lock(&session->lock);
1581 fail_command(conn, ctask, DID_ABORT << 16); 1668 fail_command(conn, task, DID_ABORT << 16);
1582 conn->tmf_state = TMF_INITIAL; 1669 conn->tmf_state = TMF_INITIAL;
1583 spin_unlock(&session->lock); 1670 spin_unlock(&session->lock);
1584 write_unlock_bh(conn->recv_lock);
1585 iscsi_start_tx(conn); 1671 iscsi_start_tx(conn);
1586 goto success_unlocked; 1672 goto success_unlocked;
1587 case TMF_TIMEDOUT: 1673 case TMF_TIMEDOUT:
@@ -1591,7 +1677,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1591 case TMF_NOT_FOUND: 1677 case TMF_NOT_FOUND:
1592 if (!sc->SCp.ptr) { 1678 if (!sc->SCp.ptr) {
1593 conn->tmf_state = TMF_INITIAL; 1679 conn->tmf_state = TMF_INITIAL;
1594 /* ctask completed before tmf abort response */ 1680 /* task completed before tmf abort response */
1595 debug_scsi("sc completed while abort in progress\n"); 1681 debug_scsi("sc completed while abort in progress\n");
1596 goto success; 1682 goto success;
1597 } 1683 }
@@ -1604,7 +1690,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1604success: 1690success:
1605 spin_unlock_bh(&session->lock); 1691 spin_unlock_bh(&session->lock);
1606success_unlocked: 1692success_unlocked:
1607 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); 1693 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
1608 mutex_unlock(&session->eh_mutex); 1694 mutex_unlock(&session->eh_mutex);
1609 return SUCCESS; 1695 return SUCCESS;
1610 1696
@@ -1612,7 +1698,7 @@ failed:
1612 spin_unlock_bh(&session->lock); 1698 spin_unlock_bh(&session->lock);
1613failed_unlocked: 1699failed_unlocked:
1614 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc, 1700 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
1615 ctask ? ctask->itt : 0); 1701 task ? task->itt : 0);
1616 mutex_unlock(&session->eh_mutex); 1702 mutex_unlock(&session->eh_mutex);
1617 return FAILED; 1703 return FAILED;
1618} 1704}
@@ -1630,12 +1716,15 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
1630 1716
1631int iscsi_eh_device_reset(struct scsi_cmnd *sc) 1717int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1632{ 1718{
1633 struct Scsi_Host *host = sc->device->host; 1719 struct iscsi_cls_session *cls_session;
1634 struct iscsi_session *session = iscsi_hostdata(host->hostdata); 1720 struct iscsi_session *session;
1635 struct iscsi_conn *conn; 1721 struct iscsi_conn *conn;
1636 struct iscsi_tm *hdr; 1722 struct iscsi_tm *hdr;
1637 int rc = FAILED; 1723 int rc = FAILED;
1638 1724
1725 cls_session = starget_to_session(scsi_target(sc->device));
1726 session = cls_session->dd_data;
1727
1639 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun); 1728 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
1640 1729
1641 mutex_lock(&session->eh_mutex); 1730 mutex_lock(&session->eh_mutex);
@@ -1678,13 +1767,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1678 spin_unlock_bh(&session->lock); 1767 spin_unlock_bh(&session->lock);
1679 1768
1680 iscsi_suspend_tx(conn); 1769 iscsi_suspend_tx(conn);
1681 /* need to grab the recv lock then session lock */ 1770
1682 write_lock_bh(conn->recv_lock);
1683 spin_lock(&session->lock); 1771 spin_lock(&session->lock);
1684 fail_all_commands(conn, sc->device->lun, DID_ERROR); 1772 fail_all_commands(conn, sc->device->lun, DID_ERROR);
1685 conn->tmf_state = TMF_INITIAL; 1773 conn->tmf_state = TMF_INITIAL;
1686 spin_unlock(&session->lock); 1774 spin_unlock(&session->lock);
1687 write_unlock_bh(conn->recv_lock);
1688 1775
1689 iscsi_start_tx(conn); 1776 iscsi_start_tx(conn);
1690 goto done; 1777 goto done;
@@ -1760,177 +1847,203 @@ void iscsi_pool_free(struct iscsi_pool *q)
1760} 1847}
1761EXPORT_SYMBOL_GPL(iscsi_pool_free); 1848EXPORT_SYMBOL_GPL(iscsi_pool_free);
1762 1849
1763/* 1850/**
1764 * iSCSI Session's hostdata organization: 1851 * iscsi_host_add - add host to system
1852 * @shost: scsi host
1853 * @pdev: parent device
1854 *
1855 * This should be called by partial offload and software iscsi drivers
1856 * to add a host to the system.
1857 */
1858int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
1859{
1860 if (!shost->can_queue)
1861 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
1862
1863 return scsi_add_host(shost, pdev);
1864}
1865EXPORT_SYMBOL_GPL(iscsi_host_add);
1866
1867/**
1868 * iscsi_host_alloc - allocate a host and driver data
1869 * @sht: scsi host template
1870 * @dd_data_size: driver host data size
1871 * @qdepth: default device queue depth
1872 *
1873 * This should be called by partial offload and software iscsi drivers.
1874 * To access the driver specific memory use the iscsi_host_priv() macro.
1875 */
1876struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
1877 int dd_data_size, uint16_t qdepth)
1878{
1879 struct Scsi_Host *shost;
1880
1881 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
1882 if (!shost)
1883 return NULL;
1884 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
1885
1886 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
1887 if (qdepth != 0)
1888 printk(KERN_ERR "iscsi: invalid queue depth of %d. "
1889 "Queue depth must be between 1 and %d.\n",
1890 qdepth, ISCSI_MAX_CMD_PER_LUN);
1891 qdepth = ISCSI_DEF_CMD_PER_LUN;
1892 }
1893 shost->cmd_per_lun = qdepth;
1894 return shost;
1895}
1896EXPORT_SYMBOL_GPL(iscsi_host_alloc);
1897
1898/**
1899 * iscsi_host_remove - remove host and sessions
1900 * @shost: scsi host
1765 * 1901 *
1766 * *------------------* <== hostdata_session(host->hostdata) 1902 * This will also remove any sessions attached to the host, but if userspace
1767 * | ptr to class sess| 1903 * is managing the session at the same time this will break. TODO: add
1768 * |------------------| <== iscsi_hostdata(host->hostdata) 1904 * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
1769 * | iscsi_session | 1905 * does not remove the memory from under us.
1770 * *------------------*
1771 */ 1906 */
1907void iscsi_host_remove(struct Scsi_Host *shost)
1908{
1909 iscsi_host_for_each_session(shost, iscsi_session_teardown);
1910 scsi_remove_host(shost);
1911}
1912EXPORT_SYMBOL_GPL(iscsi_host_remove);
1772 1913
1773#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \ 1914void iscsi_host_free(struct Scsi_Host *shost)
1774 _sz % sizeof(unsigned long)) 1915{
1916 struct iscsi_host *ihost = shost_priv(shost);
1775 1917
1776#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata)) 1918 kfree(ihost->netdev);
1919 kfree(ihost->hwaddress);
1920 kfree(ihost->initiatorname);
1921 scsi_host_put(shost);
1922}
1923EXPORT_SYMBOL_GPL(iscsi_host_free);
1777 1924
1778/** 1925/**
1779 * iscsi_session_setup - create iscsi cls session and host and session 1926 * iscsi_session_setup - create iscsi cls session and host and session
1780 * @scsit: scsi transport template
1781 * @iscsit: iscsi transport template 1927 * @iscsit: iscsi transport template
1782 * @cmds_max: scsi host can queue 1928 * @shost: scsi host
1783 * @qdepth: scsi host cmds per lun 1929 * @cmds_max: session can queue
1784 * @cmd_task_size: LLD ctask private data size 1930 * @cmd_task_size: LLD task private data size
1785 * @mgmt_task_size: LLD mtask private data size
1786 * @initial_cmdsn: initial CmdSN 1931 * @initial_cmdsn: initial CmdSN
1787 * @hostno: host no allocated
1788 * 1932 *
1789 * This can be used by software iscsi_transports that allocate 1933 * This can be used by software iscsi_transports that allocate
1790 * a session per scsi host. 1934 * a session per scsi host.
1791 **/ 1935 *
1936 * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
1937 * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
1938 * for nop handling and login/logout requests.
1939 */
1792struct iscsi_cls_session * 1940struct iscsi_cls_session *
1793iscsi_session_setup(struct iscsi_transport *iscsit, 1941iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
1794 struct scsi_transport_template *scsit, 1942 uint16_t cmds_max, int cmd_task_size,
1795 uint16_t cmds_max, uint16_t qdepth, 1943 uint32_t initial_cmdsn, unsigned int id)
1796 int cmd_task_size, int mgmt_task_size,
1797 uint32_t initial_cmdsn, uint32_t *hostno)
1798{ 1944{
1799 struct Scsi_Host *shost;
1800 struct iscsi_session *session; 1945 struct iscsi_session *session;
1801 struct iscsi_cls_session *cls_session; 1946 struct iscsi_cls_session *cls_session;
1802 int cmd_i; 1947 int cmd_i, scsi_cmds, total_cmds = cmds_max;
1803 1948
1804 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { 1949 if (!total_cmds)
1805 if (qdepth != 0) 1950 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
1806 printk(KERN_ERR "iscsi: invalid queue depth of %d. " 1951 /*
1807 "Queue depth must be between 1 and %d.\n", 1952 * The iscsi layer needs some tasks for nop handling and tmfs,
1808 qdepth, ISCSI_MAX_CMD_PER_LUN); 1953 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
1809 qdepth = ISCSI_DEF_CMD_PER_LUN; 1954 * + 1 command for scsi IO.
1955 */
1956 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
1957 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1958 "must be a power of two that is at least %d.\n",
1959 total_cmds, ISCSI_TOTAL_CMDS_MIN);
1960 return NULL;
1810 } 1961 }
1811 1962
1812 if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET || 1963 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
1813 cmds_max < 2) { 1964 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1814 if (cmds_max != 0) 1965 "must be a power of 2 less than or equal to %d.\n",
1815 printk(KERN_ERR "iscsi: invalid can_queue of %d. " 1966 cmds_max, ISCSI_TOTAL_CMDS_MAX);
1816 "can_queue must be a power of 2 and between " 1967 total_cmds = ISCSI_TOTAL_CMDS_MAX;
1817 "2 and %d - setting to %d.\n", cmds_max,
1818 ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
1819 cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
1820 } 1968 }
1821 1969
1822 shost = scsi_host_alloc(iscsit->host_template, 1970 if (!is_power_of_2(total_cmds)) {
1823 hostdata_privsize(sizeof(*session))); 1971 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
1824 if (!shost) 1972 "must be a power of 2.\n", total_cmds);
1825 return NULL; 1973 total_cmds = rounddown_pow_of_two(total_cmds);
1826 1974 if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
1827 /* the iscsi layer takes one task for reserve */ 1975 return NULL;
1828 shost->can_queue = cmds_max - 1; 1976 printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
1829 shost->cmd_per_lun = qdepth; 1977 total_cmds);
1830 shost->max_id = 1; 1978 }
1831 shost->max_channel = 0; 1979 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
1832 shost->max_lun = iscsit->max_lun;
1833 shost->max_cmd_len = iscsit->max_cmd_len;
1834 shost->transportt = scsit;
1835 shost->transportt->create_work_queue = 1;
1836 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
1837 *hostno = shost->host_no;
1838 1980
1839 session = iscsi_hostdata(shost->hostdata); 1981 cls_session = iscsi_alloc_session(shost, iscsit,
1840 memset(session, 0, sizeof(struct iscsi_session)); 1982 sizeof(struct iscsi_session));
1983 if (!cls_session)
1984 return NULL;
1985 session = cls_session->dd_data;
1986 session->cls_session = cls_session;
1841 session->host = shost; 1987 session->host = shost;
1842 session->state = ISCSI_STATE_FREE; 1988 session->state = ISCSI_STATE_FREE;
1843 session->fast_abort = 1; 1989 session->fast_abort = 1;
1844 session->lu_reset_timeout = 15; 1990 session->lu_reset_timeout = 15;
1845 session->abort_timeout = 10; 1991 session->abort_timeout = 10;
1846 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 1992 session->scsi_cmds_max = scsi_cmds;
1847 session->cmds_max = cmds_max; 1993 session->cmds_max = total_cmds;
1848 session->queued_cmdsn = session->cmdsn = initial_cmdsn; 1994 session->queued_cmdsn = session->cmdsn = initial_cmdsn;
1849 session->exp_cmdsn = initial_cmdsn + 1; 1995 session->exp_cmdsn = initial_cmdsn + 1;
1850 session->max_cmdsn = initial_cmdsn + 1; 1996 session->max_cmdsn = initial_cmdsn + 1;
1851 session->max_r2t = 1; 1997 session->max_r2t = 1;
1852 session->tt = iscsit; 1998 session->tt = iscsit;
1853 mutex_init(&session->eh_mutex); 1999 mutex_init(&session->eh_mutex);
2000 spin_lock_init(&session->lock);
1854 2001
1855 /* initialize SCSI PDU commands pool */ 2002 /* initialize SCSI PDU commands pool */
1856 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, 2003 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
1857 (void***)&session->cmds, 2004 (void***)&session->cmds,
1858 cmd_task_size + sizeof(struct iscsi_cmd_task))) 2005 cmd_task_size + sizeof(struct iscsi_task)))
1859 goto cmdpool_alloc_fail; 2006 goto cmdpool_alloc_fail;
1860 2007
1861 /* pre-format cmds pool with ITT */ 2008 /* pre-format cmds pool with ITT */
1862 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 2009 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1863 struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; 2010 struct iscsi_task *task = session->cmds[cmd_i];
1864 2011
1865 if (cmd_task_size) 2012 if (cmd_task_size)
1866 ctask->dd_data = &ctask[1]; 2013 task->dd_data = &task[1];
1867 ctask->itt = cmd_i; 2014 task->itt = cmd_i;
1868 INIT_LIST_HEAD(&ctask->running); 2015 INIT_LIST_HEAD(&task->running);
1869 }
1870
1871 spin_lock_init(&session->lock);
1872
1873 /* initialize immediate command pool */
1874 if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
1875 (void***)&session->mgmt_cmds,
1876 mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
1877 goto mgmtpool_alloc_fail;
1878
1879
1880 /* pre-format immediate cmds pool with ITT */
1881 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
1882 struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
1883
1884 if (mgmt_task_size)
1885 mtask->dd_data = &mtask[1];
1886 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
1887 INIT_LIST_HEAD(&mtask->running);
1888 } 2016 }
1889 2017
1890 if (scsi_add_host(shost, NULL))
1891 goto add_host_fail;
1892
1893 if (!try_module_get(iscsit->owner)) 2018 if (!try_module_get(iscsit->owner))
1894 goto cls_session_fail; 2019 goto module_get_fail;
1895
1896 cls_session = iscsi_create_session(shost, iscsit, 0);
1897 if (!cls_session)
1898 goto module_put;
1899 *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
1900 2020
2021 if (iscsi_add_session(cls_session, id))
2022 goto cls_session_fail;
1901 return cls_session; 2023 return cls_session;
1902 2024
1903module_put:
1904 module_put(iscsit->owner);
1905cls_session_fail: 2025cls_session_fail:
1906 scsi_remove_host(shost); 2026 module_put(iscsit->owner);
1907add_host_fail: 2027module_get_fail:
1908 iscsi_pool_free(&session->mgmtpool);
1909mgmtpool_alloc_fail:
1910 iscsi_pool_free(&session->cmdpool); 2028 iscsi_pool_free(&session->cmdpool);
1911cmdpool_alloc_fail: 2029cmdpool_alloc_fail:
1912 scsi_host_put(shost); 2030 iscsi_free_session(cls_session);
1913 return NULL; 2031 return NULL;
1914} 2032}
1915EXPORT_SYMBOL_GPL(iscsi_session_setup); 2033EXPORT_SYMBOL_GPL(iscsi_session_setup);
1916 2034
1917/** 2035/**
1918 * iscsi_session_teardown - destroy session, host, and cls_session 2036 * iscsi_session_teardown - destroy session, host, and cls_session
1919 * shost: scsi host 2037 * @cls_session: iscsi session
1920 * 2038 *
1921 * This can be used by software iscsi_transports that allocate 2039 * The driver must have called iscsi_remove_session before
1922 * a session per scsi host. 2040 * calling this.
1923 **/ 2041 */
1924void iscsi_session_teardown(struct iscsi_cls_session *cls_session) 2042void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1925{ 2043{
1926 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 2044 struct iscsi_session *session = cls_session->dd_data;
1927 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1928 struct module *owner = cls_session->transport->owner; 2045 struct module *owner = cls_session->transport->owner;
1929 2046
1930 iscsi_remove_session(cls_session);
1931 scsi_remove_host(shost);
1932
1933 iscsi_pool_free(&session->mgmtpool);
1934 iscsi_pool_free(&session->cmdpool); 2047 iscsi_pool_free(&session->cmdpool);
1935 2048
1936 kfree(session->password); 2049 kfree(session->password);
@@ -1938,12 +2051,10 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1938 kfree(session->username); 2051 kfree(session->username);
1939 kfree(session->username_in); 2052 kfree(session->username_in);
1940 kfree(session->targetname); 2053 kfree(session->targetname);
1941 kfree(session->netdev);
1942 kfree(session->hwaddress);
1943 kfree(session->initiatorname); 2054 kfree(session->initiatorname);
2055 kfree(session->ifacename);
1944 2056
1945 iscsi_free_session(cls_session); 2057 iscsi_destroy_session(cls_session);
1946 scsi_host_put(shost);
1947 module_put(owner); 2058 module_put(owner);
1948} 2059}
1949EXPORT_SYMBOL_GPL(iscsi_session_teardown); 2060EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -1951,22 +2062,26 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
1951/** 2062/**
1952 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn 2063 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
1953 * @cls_session: iscsi_cls_session 2064 * @cls_session: iscsi_cls_session
2065 * @dd_size: private driver data size
1954 * @conn_idx: cid 2066 * @conn_idx: cid
1955 **/ 2067 */
1956struct iscsi_cls_conn * 2068struct iscsi_cls_conn *
1957iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) 2069iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2070 uint32_t conn_idx)
1958{ 2071{
1959 struct iscsi_session *session = class_to_transport_session(cls_session); 2072 struct iscsi_session *session = cls_session->dd_data;
1960 struct iscsi_conn *conn; 2073 struct iscsi_conn *conn;
1961 struct iscsi_cls_conn *cls_conn; 2074 struct iscsi_cls_conn *cls_conn;
1962 char *data; 2075 char *data;
1963 2076
1964 cls_conn = iscsi_create_conn(cls_session, conn_idx); 2077 cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
2078 conn_idx);
1965 if (!cls_conn) 2079 if (!cls_conn)
1966 return NULL; 2080 return NULL;
1967 conn = cls_conn->dd_data; 2081 conn = cls_conn->dd_data;
1968 memset(conn, 0, sizeof(*conn)); 2082 memset(conn, 0, sizeof(*conn) + dd_size);
1969 2083
2084 conn->dd_data = cls_conn->dd_data + sizeof(*conn);
1970 conn->session = session; 2085 conn->session = session;
1971 conn->cls_conn = cls_conn; 2086 conn->cls_conn = cls_conn;
1972 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2087 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
@@ -1985,30 +2100,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1985 INIT_LIST_HEAD(&conn->requeue); 2100 INIT_LIST_HEAD(&conn->requeue);
1986 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2101 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
1987 2102
1988 /* allocate login_mtask used for the login/text sequences */ 2103 /* allocate login_task used for the login/text sequences */
1989 spin_lock_bh(&session->lock); 2104 spin_lock_bh(&session->lock);
1990 if (!__kfifo_get(session->mgmtpool.queue, 2105 if (!__kfifo_get(session->cmdpool.queue,
1991 (void*)&conn->login_mtask, 2106 (void*)&conn->login_task,
1992 sizeof(void*))) { 2107 sizeof(void*))) {
1993 spin_unlock_bh(&session->lock); 2108 spin_unlock_bh(&session->lock);
1994 goto login_mtask_alloc_fail; 2109 goto login_task_alloc_fail;
1995 } 2110 }
1996 spin_unlock_bh(&session->lock); 2111 spin_unlock_bh(&session->lock);
1997 2112
1998 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); 2113 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
1999 if (!data) 2114 if (!data)
2000 goto login_mtask_data_alloc_fail; 2115 goto login_task_data_alloc_fail;
2001 conn->login_mtask->data = conn->data = data; 2116 conn->login_task->data = conn->data = data;
2002 2117
2003 init_timer(&conn->tmf_timer); 2118 init_timer(&conn->tmf_timer);
2004 init_waitqueue_head(&conn->ehwait); 2119 init_waitqueue_head(&conn->ehwait);
2005 2120
2006 return cls_conn; 2121 return cls_conn;
2007 2122
2008login_mtask_data_alloc_fail: 2123login_task_data_alloc_fail:
2009 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, 2124 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
2010 sizeof(void*)); 2125 sizeof(void*));
2011login_mtask_alloc_fail: 2126login_task_alloc_fail:
2012 iscsi_destroy_conn(cls_conn); 2127 iscsi_destroy_conn(cls_conn);
2013 return NULL; 2128 return NULL;
2014} 2129}
@@ -2068,7 +2183,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2068 spin_lock_bh(&session->lock); 2183 spin_lock_bh(&session->lock);
2069 kfree(conn->data); 2184 kfree(conn->data);
2070 kfree(conn->persistent_address); 2185 kfree(conn->persistent_address);
2071 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, 2186 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
2072 sizeof(void*)); 2187 sizeof(void*));
2073 if (session->leadconn == conn) 2188 if (session->leadconn == conn)
2074 session->leadconn = NULL; 2189 session->leadconn = NULL;
@@ -2140,7 +2255,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2140 } 2255 }
2141 spin_unlock_bh(&session->lock); 2256 spin_unlock_bh(&session->lock);
2142 2257
2143 iscsi_unblock_session(session_to_cls(session)); 2258 iscsi_unblock_session(session->cls_session);
2144 wake_up(&conn->ehwait); 2259 wake_up(&conn->ehwait);
2145 return 0; 2260 return 0;
2146} 2261}
@@ -2149,21 +2264,23 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
2149static void 2264static void
2150flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) 2265flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
2151{ 2266{
2152 struct iscsi_mgmt_task *mtask, *tmp; 2267 struct iscsi_task *task, *tmp;
2153 2268
2154 /* handle pending */ 2269 /* handle pending */
2155 list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) { 2270 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
2156 debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt); 2271 debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
2157 iscsi_free_mgmt_task(conn, mtask); 2272 /* release ref from prep task */
2273 __iscsi_put_task(task);
2158 } 2274 }
2159 2275
2160 /* handle running */ 2276 /* handle running */
2161 list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) { 2277 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
2162 debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt); 2278 debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
2163 iscsi_free_mgmt_task(conn, mtask); 2279 /* release ref from prep task */
2280 __iscsi_put_task(task);
2164 } 2281 }
2165 2282
2166 conn->mtask = NULL; 2283 conn->task = NULL;
2167} 2284}
2168 2285
2169static void iscsi_start_session_recovery(struct iscsi_session *session, 2286static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2182,17 +2299,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2182 } 2299 }
2183 2300
2184 /* 2301 /*
2185 * The LLD either freed/unset the lock on us, or userspace called
2186 * stop but did not create a proper connection (connection was never
2187 * bound or it was unbound then stop was called).
2188 */
2189 if (!conn->recv_lock) {
2190 spin_unlock_bh(&session->lock);
2191 mutex_unlock(&session->eh_mutex);
2192 return;
2193 }
2194
2195 /*
2196 * When this is called for the in_login state, we only want to clean 2302 * When this is called for the in_login state, we only want to clean
2197 * up the login task and connection. We do not need to block and set 2303 * up the login task and connection. We do not need to block and set
2198 * the recovery state again 2304 * the recovery state again
@@ -2208,11 +2314,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2208 spin_unlock_bh(&session->lock); 2314 spin_unlock_bh(&session->lock);
2209 2315
2210 iscsi_suspend_tx(conn); 2316 iscsi_suspend_tx(conn);
2211
2212 write_lock_bh(conn->recv_lock);
2213 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2214 write_unlock_bh(conn->recv_lock);
2215
2216 /* 2317 /*
2217 * for connection level recovery we should not calculate 2318 * for connection level recovery we should not calculate
2218 * header digest. conn->hdr_size used for optimization 2319 * header digest. conn->hdr_size used for optimization
@@ -2225,7 +2326,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2225 if (session->state == ISCSI_STATE_IN_RECOVERY && 2326 if (session->state == ISCSI_STATE_IN_RECOVERY &&
2226 old_stop_stage != STOP_CONN_RECOVER) { 2327 old_stop_stage != STOP_CONN_RECOVER) {
2227 debug_scsi("blocking session\n"); 2328 debug_scsi("blocking session\n");
2228 iscsi_block_session(session_to_cls(session)); 2329 iscsi_block_session(session->cls_session);
2229 } 2330 }
2230 } 2331 }
2231 2332
@@ -2260,7 +2361,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
2260int iscsi_conn_bind(struct iscsi_cls_session *cls_session, 2361int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2261 struct iscsi_cls_conn *cls_conn, int is_leading) 2362 struct iscsi_cls_conn *cls_conn, int is_leading)
2262{ 2363{
2263 struct iscsi_session *session = class_to_transport_session(cls_session); 2364 struct iscsi_session *session = cls_session->dd_data;
2264 struct iscsi_conn *conn = cls_conn->dd_data; 2365 struct iscsi_conn *conn = cls_conn->dd_data;
2265 2366
2266 spin_lock_bh(&session->lock); 2367 spin_lock_bh(&session->lock);
@@ -2399,6 +2500,14 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2399 if (!conn->persistent_address) 2500 if (!conn->persistent_address)
2400 return -ENOMEM; 2501 return -ENOMEM;
2401 break; 2502 break;
2503 case ISCSI_PARAM_IFACE_NAME:
2504 if (!session->ifacename)
2505 session->ifacename = kstrdup(buf, GFP_KERNEL);
2506 break;
2507 case ISCSI_PARAM_INITIATOR_NAME:
2508 if (!session->initiatorname)
2509 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2510 break;
2402 default: 2511 default:
2403 return -ENOSYS; 2512 return -ENOSYS;
2404 } 2513 }
@@ -2410,8 +2519,7 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
2410int iscsi_session_get_param(struct iscsi_cls_session *cls_session, 2519int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2411 enum iscsi_param param, char *buf) 2520 enum iscsi_param param, char *buf)
2412{ 2521{
2413 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 2522 struct iscsi_session *session = cls_session->dd_data;
2414 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2415 int len; 2523 int len;
2416 2524
2417 switch(param) { 2525 switch(param) {
@@ -2466,6 +2574,15 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2466 case ISCSI_PARAM_PASSWORD_IN: 2574 case ISCSI_PARAM_PASSWORD_IN:
2467 len = sprintf(buf, "%s\n", session->password_in); 2575 len = sprintf(buf, "%s\n", session->password_in);
2468 break; 2576 break;
2577 case ISCSI_PARAM_IFACE_NAME:
2578 len = sprintf(buf, "%s\n", session->ifacename);
2579 break;
2580 case ISCSI_PARAM_INITIATOR_NAME:
2581 if (!session->initiatorname)
2582 len = sprintf(buf, "%s\n", "unknown");
2583 else
2584 len = sprintf(buf, "%s\n", session->initiatorname);
2585 break;
2469 default: 2586 default:
2470 return -ENOSYS; 2587 return -ENOSYS;
2471 } 2588 }
@@ -2525,29 +2642,35 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
2525int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2642int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2526 char *buf) 2643 char *buf)
2527{ 2644{
2528 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2645 struct iscsi_host *ihost = shost_priv(shost);
2529 int len; 2646 int len;
2530 2647
2531 switch (param) { 2648 switch (param) {
2532 case ISCSI_HOST_PARAM_NETDEV_NAME: 2649 case ISCSI_HOST_PARAM_NETDEV_NAME:
2533 if (!session->netdev) 2650 if (!ihost->netdev)
2534 len = sprintf(buf, "%s\n", "default"); 2651 len = sprintf(buf, "%s\n", "default");
2535 else 2652 else
2536 len = sprintf(buf, "%s\n", session->netdev); 2653 len = sprintf(buf, "%s\n", ihost->netdev);
2537 break; 2654 break;
2538 case ISCSI_HOST_PARAM_HWADDRESS: 2655 case ISCSI_HOST_PARAM_HWADDRESS:
2539 if (!session->hwaddress) 2656 if (!ihost->hwaddress)
2540 len = sprintf(buf, "%s\n", "default"); 2657 len = sprintf(buf, "%s\n", "default");
2541 else 2658 else
2542 len = sprintf(buf, "%s\n", session->hwaddress); 2659 len = sprintf(buf, "%s\n", ihost->hwaddress);
2543 break; 2660 break;
2544 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2661 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2545 if (!session->initiatorname) 2662 if (!ihost->initiatorname)
2546 len = sprintf(buf, "%s\n", "unknown"); 2663 len = sprintf(buf, "%s\n", "unknown");
2547 else 2664 else
2548 len = sprintf(buf, "%s\n", session->initiatorname); 2665 len = sprintf(buf, "%s\n", ihost->initiatorname);
2666 break;
2667 case ISCSI_HOST_PARAM_IPADDRESS:
2668 if (!strlen(ihost->local_address))
2669 len = sprintf(buf, "%s\n", "unknown");
2670 else
2671 len = sprintf(buf, "%s\n",
2672 ihost->local_address);
2549 break; 2673 break;
2550
2551 default: 2674 default:
2552 return -ENOSYS; 2675 return -ENOSYS;
2553 } 2676 }
@@ -2559,20 +2682,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
2559int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2682int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2560 char *buf, int buflen) 2683 char *buf, int buflen)
2561{ 2684{
2562 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2685 struct iscsi_host *ihost = shost_priv(shost);
2563 2686
2564 switch (param) { 2687 switch (param) {
2565 case ISCSI_HOST_PARAM_NETDEV_NAME: 2688 case ISCSI_HOST_PARAM_NETDEV_NAME:
2566 if (!session->netdev) 2689 if (!ihost->netdev)
2567 session->netdev = kstrdup(buf, GFP_KERNEL); 2690 ihost->netdev = kstrdup(buf, GFP_KERNEL);
2568 break; 2691 break;
2569 case ISCSI_HOST_PARAM_HWADDRESS: 2692 case ISCSI_HOST_PARAM_HWADDRESS:
2570 if (!session->hwaddress) 2693 if (!ihost->hwaddress)
2571 session->hwaddress = kstrdup(buf, GFP_KERNEL); 2694 ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
2572 break; 2695 break;
2573 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2696 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2574 if (!session->initiatorname) 2697 if (!ihost->initiatorname)
2575 session->initiatorname = kstrdup(buf, GFP_KERNEL); 2698 ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
2576 break; 2699 break;
2577 default: 2700 default:
2578 return -ENOSYS; 2701 return -ENOSYS;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index ec0b0f6e5e1a..e0e018d12653 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -33,6 +33,7 @@ struct lpfc_sli2_slim;
33#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ 33#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */
34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
36#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
36 37
37/* 38/*
38 * Following time intervals are used of adjusting SCSI device 39 * Following time intervals are used of adjusting SCSI device
@@ -59,6 +60,9 @@ struct lpfc_sli2_slim;
59 60
60#define MAX_HBAEVT 32 61#define MAX_HBAEVT 32
61 62
63/* lpfc wait event data ready flag */
64#define LPFC_DATA_READY (1<<0)
65
62enum lpfc_polling_flags { 66enum lpfc_polling_flags {
63 ENABLE_FCP_RING_POLLING = 0x1, 67 ENABLE_FCP_RING_POLLING = 0x1,
64 DISABLE_FCP_RING_INT = 0x2 68 DISABLE_FCP_RING_INT = 0x2
@@ -425,9 +429,6 @@ struct lpfc_hba {
425 429
426 uint16_t pci_cfg_value; 430 uint16_t pci_cfg_value;
427 431
428 uint8_t work_found;
429#define LPFC_MAX_WORKER_ITERATION 4
430
431 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 432 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
432 433
433 uint32_t fc_eventTag; /* event tag for link attention */ 434 uint32_t fc_eventTag; /* event tag for link attention */
@@ -489,8 +490,9 @@ struct lpfc_hba {
489 uint32_t work_hs; /* HS stored in case of ERRAT */ 490 uint32_t work_hs; /* HS stored in case of ERRAT */
490 uint32_t work_status[2]; /* Extra status from SLIM */ 491 uint32_t work_status[2]; /* Extra status from SLIM */
491 492
492 wait_queue_head_t *work_wait; 493 wait_queue_head_t work_waitq;
493 struct task_struct *worker_thread; 494 struct task_struct *worker_thread;
495 long data_flags;
494 496
495 uint32_t hbq_in_use; /* HBQs in use flag */ 497 uint32_t hbq_in_use; /* HBQs in use flag */
496 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 498 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
@@ -637,6 +639,17 @@ lpfc_is_link_up(struct lpfc_hba *phba)
637 phba->link_state == LPFC_HBA_READY; 639 phba->link_state == LPFC_HBA_READY;
638} 640}
639 641
642static inline void
643lpfc_worker_wake_up(struct lpfc_hba *phba)
644{
645 /* Set the lpfc data pending flag */
646 set_bit(LPFC_DATA_READY, &phba->data_flags);
647
648 /* Wake up worker thread */
649 wake_up(&phba->work_waitq);
650 return;
651}
652
640#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ 653#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
641#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature 654#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
642 event */ 655 event */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 960baaf11fb1..37bfa0bd1dae 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1995,8 +1995,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1995 /* Don't allow mailbox commands to be sent when blocked 1995 /* Don't allow mailbox commands to be sent when blocked
1996 * or when in the middle of discovery 1996 * or when in the middle of discovery
1997 */ 1997 */
1998 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO || 1998 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
1999 vport->fc_flag & FC_NDISC_ACTIVE) {
2000 sysfs_mbox_idle(phba); 1999 sysfs_mbox_idle(phba);
2001 spin_unlock_irq(&phba->hbalock); 2000 spin_unlock_irq(&phba->hbalock);
2002 return -EAGAIN; 2001 return -EAGAIN;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 7c9f8317d972..1b8245213b83 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -142,7 +142,7 @@ int lpfc_config_port_post(struct lpfc_hba *);
142int lpfc_hba_down_prep(struct lpfc_hba *); 142int lpfc_hba_down_prep(struct lpfc_hba *);
143int lpfc_hba_down_post(struct lpfc_hba *); 143int lpfc_hba_down_post(struct lpfc_hba *);
144void lpfc_hba_init(struct lpfc_hba *, uint32_t *); 144void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
145int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int); 145int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
146void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); 146void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
147int lpfc_online(struct lpfc_hba *); 147int lpfc_online(struct lpfc_hba *);
148void lpfc_unblock_mgmt_io(struct lpfc_hba *); 148void lpfc_unblock_mgmt_io(struct lpfc_hba *);
@@ -263,6 +263,7 @@ extern int lpfc_sli_mode;
263extern int lpfc_enable_npiv; 263extern int lpfc_enable_npiv;
264 264
265int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); 265int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
266int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
266void lpfc_terminate_rport_io(struct fc_rport *); 267void lpfc_terminate_rport_io(struct fc_rport *);
267void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); 268void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
268 269
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 153afae567b5..7fc74cf5823b 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -101,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
101 /* Not enough posted buffers; Try posting more buffers */ 101 /* Not enough posted buffers; Try posting more buffers */
102 phba->fc_stat.NoRcvBuf++; 102 phba->fc_stat.NoRcvBuf++;
103 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 103 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
104 lpfc_post_buffer(phba, pring, 2, 1); 104 lpfc_post_buffer(phba, pring, 2);
105 return; 105 return;
106 } 106 }
107 107
@@ -151,7 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
151 } 151 }
152 list_del(&iocbq->list); 152 list_del(&iocbq->list);
153 lpfc_sli_release_iocbq(phba, iocbq); 153 lpfc_sli_release_iocbq(phba, iocbq);
154 lpfc_post_buffer(phba, pring, i, 1); 154 lpfc_post_buffer(phba, pring, i);
155 } 155 }
156 } 156 }
157} 157}
@@ -990,7 +990,7 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
990 return; 990 return;
991} 991}
992 992
993static int 993int
994lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, 994lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
995 size_t size) 995 size_t size)
996{ 996{
@@ -1679,20 +1679,18 @@ lpfc_fdmi_tmo(unsigned long ptr)
1679{ 1679{
1680 struct lpfc_vport *vport = (struct lpfc_vport *)ptr; 1680 struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
1681 struct lpfc_hba *phba = vport->phba; 1681 struct lpfc_hba *phba = vport->phba;
1682 uint32_t tmo_posted;
1682 unsigned long iflag; 1683 unsigned long iflag;
1683 1684
1684 spin_lock_irqsave(&vport->work_port_lock, iflag); 1685 spin_lock_irqsave(&vport->work_port_lock, iflag);
1685 if (!(vport->work_port_events & WORKER_FDMI_TMO)) { 1686 tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
1687 if (!tmo_posted)
1686 vport->work_port_events |= WORKER_FDMI_TMO; 1688 vport->work_port_events |= WORKER_FDMI_TMO;
1687 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 1689 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1688 1690
1689 spin_lock_irqsave(&phba->hbalock, iflag); 1691 if (!tmo_posted)
1690 if (phba->work_wait) 1692 lpfc_worker_wake_up(phba);
1691 lpfc_worker_wake_up(phba); 1693 return;
1692 spin_unlock_irqrestore(&phba->hbalock, iflag);
1693 }
1694 else
1695 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1696} 1694}
1697 1695
1698void 1696void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 886c5f1b11d2..f54e0f7eaee3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1754,29 +1754,34 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1755 struct lpfc_work_evt *evtp; 1755 struct lpfc_work_evt *evtp;
1756 1756
1757 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
1758 return;
1757 spin_lock_irq(shost->host_lock); 1759 spin_lock_irq(shost->host_lock);
1758 nlp->nlp_flag &= ~NLP_DELAY_TMO; 1760 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1759 spin_unlock_irq(shost->host_lock); 1761 spin_unlock_irq(shost->host_lock);
1760 del_timer_sync(&nlp->nlp_delayfunc); 1762 del_timer_sync(&nlp->nlp_delayfunc);
1761 nlp->nlp_last_elscmd = 0; 1763 nlp->nlp_last_elscmd = 0;
1762
1763 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 1764 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
1764 list_del_init(&nlp->els_retry_evt.evt_listp); 1765 list_del_init(&nlp->els_retry_evt.evt_listp);
1765 /* Decrement nlp reference count held for the delayed retry */ 1766 /* Decrement nlp reference count held for the delayed retry */
1766 evtp = &nlp->els_retry_evt; 1767 evtp = &nlp->els_retry_evt;
1767 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 1768 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
1768 } 1769 }
1769
1770 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 1770 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
1771 spin_lock_irq(shost->host_lock); 1771 spin_lock_irq(shost->host_lock);
1772 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1772 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1773 spin_unlock_irq(shost->host_lock); 1773 spin_unlock_irq(shost->host_lock);
1774 if (vport->num_disc_nodes) { 1774 if (vport->num_disc_nodes) {
1775 /* Check to see if there are more 1775 if (vport->port_state < LPFC_VPORT_READY) {
1776 * PLOGIs to be sent 1776 /* Check if there are more ADISCs to be sent */
1777 */ 1777 lpfc_more_adisc(vport);
1778 lpfc_more_plogi(vport); 1778 if ((vport->num_disc_nodes == 0) &&
1779 1779 (vport->fc_npr_cnt))
1780 lpfc_els_disc_plogi(vport);
1781 } else {
1782 /* Check if there are more PLOGIs to be sent */
1783 lpfc_more_plogi(vport);
1784 }
1780 if (vport->num_disc_nodes == 0) { 1785 if (vport->num_disc_nodes == 0) {
1781 spin_lock_irq(shost->host_lock); 1786 spin_lock_irq(shost->host_lock);
1782 vport->fc_flag &= ~FC_NDISC_ACTIVE; 1787 vport->fc_flag &= ~FC_NDISC_ACTIVE;
@@ -1798,10 +1803,6 @@ lpfc_els_retry_delay(unsigned long ptr)
1798 unsigned long flags; 1803 unsigned long flags;
1799 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 1804 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
1800 1805
1801 ndlp = (struct lpfc_nodelist *) ptr;
1802 phba = ndlp->vport->phba;
1803 evtp = &ndlp->els_retry_evt;
1804
1805 spin_lock_irqsave(&phba->hbalock, flags); 1806 spin_lock_irqsave(&phba->hbalock, flags);
1806 if (!list_empty(&evtp->evt_listp)) { 1807 if (!list_empty(&evtp->evt_listp)) {
1807 spin_unlock_irqrestore(&phba->hbalock, flags); 1808 spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1812,11 +1813,11 @@ lpfc_els_retry_delay(unsigned long ptr)
1812 * count until the queued work is done 1813 * count until the queued work is done
1813 */ 1814 */
1814 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 1815 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
1815 evtp->evt = LPFC_EVT_ELS_RETRY; 1816 if (evtp->evt_arg1) {
1816 list_add_tail(&evtp->evt_listp, &phba->work_list); 1817 evtp->evt = LPFC_EVT_ELS_RETRY;
1817 if (phba->work_wait) 1818 list_add_tail(&evtp->evt_listp, &phba->work_list);
1818 lpfc_worker_wake_up(phba); 1819 lpfc_worker_wake_up(phba);
1819 1820 }
1820 spin_unlock_irqrestore(&phba->hbalock, flags); 1821 spin_unlock_irqrestore(&phba->hbalock, flags);
1821 return; 1822 return;
1822} 1823}
@@ -2761,10 +2762,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2761 npr = (PRLI *) pcmd; 2762 npr = (PRLI *) pcmd;
2762 vpd = &phba->vpd; 2763 vpd = &phba->vpd;
2763 /* 2764 /*
2764 * If our firmware version is 3.20 or later, 2765 * If the remote port is a target and our firmware version is 3.20 or
2765 * set the following bits for FC-TAPE support. 2766 * later, set the following bits for FC-TAPE support.
2766 */ 2767 */
2767 if (vpd->rev.feaLevelHigh >= 0x02) { 2768 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
2769 (vpd->rev.feaLevelHigh >= 0x02)) {
2768 npr->ConfmComplAllowed = 1; 2770 npr->ConfmComplAllowed = 1;
2769 npr->Retry = 1; 2771 npr->Retry = 1;
2770 npr->TaskRetryIdReq = 1; 2772 npr->TaskRetryIdReq = 1;
@@ -3056,27 +3058,16 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
3056{ 3058{
3057 struct lpfc_nodelist *ndlp = NULL; 3059 struct lpfc_nodelist *ndlp = NULL;
3058 3060
3059 /* Look at all nodes effected by pending RSCNs and move 3061 /* Move all affected nodes by pending RSCNs to NPR state. */
3060 * them to NPR state.
3061 */
3062
3063 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3062 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3064 if (!NLP_CHK_NODE_ACT(ndlp) || 3063 if (!NLP_CHK_NODE_ACT(ndlp) ||
3065 ndlp->nlp_state == NLP_STE_UNUSED_NODE || 3064 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
3066 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) 3065 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
3067 continue; 3066 continue;
3068
3069 lpfc_disc_state_machine(vport, ndlp, NULL, 3067 lpfc_disc_state_machine(vport, ndlp, NULL,
3070 NLP_EVT_DEVICE_RECOVERY); 3068 NLP_EVT_DEVICE_RECOVERY);
3071 3069 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3072 /*
3073 * Make sure NLP_DELAY_TMO is NOT running after a device
3074 * recovery event.
3075 */
3076 if (ndlp->nlp_flag & NLP_DELAY_TMO)
3077 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3078 } 3070 }
3079
3080 return 0; 3071 return 0;
3081} 3072}
3082 3073
@@ -3781,91 +3772,27 @@ static int
3781lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3772lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3782 struct lpfc_nodelist *fan_ndlp) 3773 struct lpfc_nodelist *fan_ndlp)
3783{ 3774{
3784 struct lpfc_dmabuf *pcmd; 3775 struct lpfc_hba *phba = vport->phba;
3785 uint32_t *lp; 3776 uint32_t *lp;
3786 IOCB_t *icmd;
3787 uint32_t cmd, did;
3788 FAN *fp; 3777 FAN *fp;
3789 struct lpfc_nodelist *ndlp, *next_ndlp;
3790 struct lpfc_hba *phba = vport->phba;
3791
3792 /* FAN received */
3793 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3794 "0265 FAN received\n");
3795 icmd = &cmdiocb->iocb;
3796 did = icmd->un.elsreq64.remoteID;
3797 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
3798 lp = (uint32_t *)pcmd->virt;
3799
3800 cmd = *lp++;
3801 fp = (FAN *) lp;
3802 3778
3779 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
3780 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
3781 fp = (FAN *) ++lp;
3803 /* FAN received; Fan does not have a reply sequence */ 3782 /* FAN received; Fan does not have a reply sequence */
3804 3783 if ((vport == phba->pport) &&
3805 if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) { 3784 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
3806 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 3785 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
3807 sizeof(struct lpfc_name)) != 0) || 3786 sizeof(struct lpfc_name))) ||
3808 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 3787 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
3809 sizeof(struct lpfc_name)) != 0)) { 3788 sizeof(struct lpfc_name)))) {
3810 /* 3789 /* This port has switched fabrics. FLOGI is required */
3811 * This node has switched fabrics. FLOGI is required
3812 * Clean up the old rpi's
3813 */
3814
3815 list_for_each_entry_safe(ndlp, next_ndlp,
3816 &vport->fc_nodes, nlp_listp) {
3817 if (!NLP_CHK_NODE_ACT(ndlp))
3818 continue;
3819 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3820 continue;
3821 if (ndlp->nlp_type & NLP_FABRIC) {
3822 /*
3823 * Clean up old Fabric, Nameserver and
3824 * other NLP_FABRIC logins
3825 */
3826 lpfc_drop_node(vport, ndlp);
3827
3828 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3829 /* Fail outstanding I/O now since this
3830 * device is marked for PLOGI
3831 */
3832 lpfc_unreg_rpi(vport, ndlp);
3833 }
3834 }
3835
3836 lpfc_initial_flogi(vport); 3790 lpfc_initial_flogi(vport);
3837 return 0; 3791 } else {
3838 } 3792 /* FAN verified - skip FLOGI */
3839 /* Discovery not needed, 3793 vport->fc_myDID = vport->fc_prevDID;
3840 * move the nodes to their original state. 3794 lpfc_issue_fabric_reglogin(vport);
3841 */
3842 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
3843 nlp_listp) {
3844 if (!NLP_CHK_NODE_ACT(ndlp))
3845 continue;
3846 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3847 continue;
3848
3849 switch (ndlp->nlp_prev_state) {
3850 case NLP_STE_UNMAPPED_NODE:
3851 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3852 lpfc_nlp_set_state(vport, ndlp,
3853 NLP_STE_UNMAPPED_NODE);
3854 break;
3855
3856 case NLP_STE_MAPPED_NODE:
3857 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3858 lpfc_nlp_set_state(vport, ndlp,
3859 NLP_STE_MAPPED_NODE);
3860 break;
3861
3862 default:
3863 break;
3864 }
3865 } 3795 }
3866
3867 /* Start discovery - this should just do CLEAR_LA */
3868 lpfc_disc_start(vport);
3869 } 3796 }
3870 return 0; 3797 return 0;
3871} 3798}
@@ -3875,20 +3802,17 @@ lpfc_els_timeout(unsigned long ptr)
3875{ 3802{
3876 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 3803 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
3877 struct lpfc_hba *phba = vport->phba; 3804 struct lpfc_hba *phba = vport->phba;
3805 uint32_t tmo_posted;
3878 unsigned long iflag; 3806 unsigned long iflag;
3879 3807
3880 spin_lock_irqsave(&vport->work_port_lock, iflag); 3808 spin_lock_irqsave(&vport->work_port_lock, iflag);
3881 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { 3809 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
3810 if (!tmo_posted)
3882 vport->work_port_events |= WORKER_ELS_TMO; 3811 vport->work_port_events |= WORKER_ELS_TMO;
3883 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 3812 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3884 3813
3885 spin_lock_irqsave(&phba->hbalock, iflag); 3814 if (!tmo_posted)
3886 if (phba->work_wait) 3815 lpfc_worker_wake_up(phba);
3887 lpfc_worker_wake_up(phba);
3888 spin_unlock_irqrestore(&phba->hbalock, iflag);
3889 }
3890 else
3891 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3892 return; 3816 return;
3893} 3817}
3894 3818
@@ -3933,9 +3857,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
3933 els_command == ELS_CMD_FDISC) 3857 els_command == ELS_CMD_FDISC)
3934 continue; 3858 continue;
3935 3859
3936 if (vport != piocb->vport)
3937 continue;
3938
3939 if (piocb->drvrTimeout > 0) { 3860 if (piocb->drvrTimeout > 0) {
3940 if (piocb->drvrTimeout >= timeout) 3861 if (piocb->drvrTimeout >= timeout)
3941 piocb->drvrTimeout -= timeout; 3862 piocb->drvrTimeout -= timeout;
@@ -4089,7 +4010,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4089 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 4010 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
4090 cmd = *payload; 4011 cmd = *payload;
4091 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 4012 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
4092 lpfc_post_buffer(phba, pring, 1, 1); 4013 lpfc_post_buffer(phba, pring, 1);
4093 4014
4094 did = icmd->un.rcvels.remoteID; 4015 did = icmd->un.rcvels.remoteID;
4095 if (icmd->ulpStatus) { 4016 if (icmd->ulpStatus) {
@@ -4398,7 +4319,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4398 phba->fc_stat.NoRcvBuf++; 4319 phba->fc_stat.NoRcvBuf++;
4399 /* Not enough posted buffers; Try posting more buffers */ 4320 /* Not enough posted buffers; Try posting more buffers */
4400 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 4321 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
4401 lpfc_post_buffer(phba, pring, 0, 1); 4322 lpfc_post_buffer(phba, pring, 0);
4402 return; 4323 return;
4403 } 4324 }
4404 4325
@@ -4842,18 +4763,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
4842 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 4763 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4843 unsigned long iflags; 4764 unsigned long iflags;
4844 uint32_t tmo_posted; 4765 uint32_t tmo_posted;
4766
4845 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 4767 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
4846 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 4768 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
4847 if (!tmo_posted) 4769 if (!tmo_posted)
4848 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 4770 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
4849 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 4771 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
4850 4772
4851 if (!tmo_posted) { 4773 if (!tmo_posted)
4852 spin_lock_irqsave(&phba->hbalock, iflags); 4774 lpfc_worker_wake_up(phba);
4853 if (phba->work_wait) 4775 return;
4854 lpfc_worker_wake_up(phba);
4855 spin_unlock_irqrestore(&phba->hbalock, iflags);
4856 }
4857} 4776}
4858 4777
4859static void 4778static void
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7cb68feb04fd..a98d11bf3576 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
153 * count until this queued work is done 153 * count until this queued work is done
154 */ 154 */
155 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 155 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
156 evtp->evt = LPFC_EVT_DEV_LOSS; 156 if (evtp->evt_arg1) {
157 list_add_tail(&evtp->evt_listp, &phba->work_list); 157 evtp->evt = LPFC_EVT_DEV_LOSS;
158 if (phba->work_wait) 158 list_add_tail(&evtp->evt_listp, &phba->work_list);
159 wake_up(phba->work_wait); 159 lpfc_worker_wake_up(phba);
160 160 }
161 spin_unlock_irq(&phba->hbalock); 161 spin_unlock_irq(&phba->hbalock);
162 162
163 return; 163 return;
@@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
277} 277}
278 278
279
280void
281lpfc_worker_wake_up(struct lpfc_hba *phba)
282{
283 wake_up(phba->work_wait);
284 return;
285}
286
287static void 279static void
288lpfc_work_list_done(struct lpfc_hba *phba) 280lpfc_work_list_done(struct lpfc_hba *phba)
289{ 281{
@@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba)
429 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 421 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
430 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 422 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
431 pring->flag |= LPFC_DEFERRED_RING_EVENT; 423 pring->flag |= LPFC_DEFERRED_RING_EVENT;
424 /* Set the lpfc data pending flag */
425 set_bit(LPFC_DATA_READY, &phba->data_flags);
432 } else { 426 } else {
433 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 427 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
434 lpfc_sli_handle_slow_ring_event(phba, pring, 428 lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba)
459 lpfc_work_list_done(phba); 453 lpfc_work_list_done(phba);
460} 454}
461 455
462static int
463check_work_wait_done(struct lpfc_hba *phba)
464{
465 struct lpfc_vport *vport;
466 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
467 int rc = 0;
468
469 spin_lock_irq(&phba->hbalock);
470 list_for_each_entry(vport, &phba->port_list, listentry) {
471 if (vport->work_port_events) {
472 rc = 1;
473 break;
474 }
475 }
476 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
477 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
478 rc = 1;
479 phba->work_found++;
480 } else
481 phba->work_found = 0;
482 spin_unlock_irq(&phba->hbalock);
483 return rc;
484}
485
486
487int 456int
488lpfc_do_work(void *p) 457lpfc_do_work(void *p)
489{ 458{
490 struct lpfc_hba *phba = p; 459 struct lpfc_hba *phba = p;
491 int rc; 460 int rc;
492 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
493 461
494 set_user_nice(current, -20); 462 set_user_nice(current, -20);
495 phba->work_wait = &work_waitq; 463 phba->data_flags = 0;
496 phba->work_found = 0;
497 464
498 while (1) { 465 while (1) {
499 466 /* wait and check worker queue activities */
500 rc = wait_event_interruptible(work_waitq, 467 rc = wait_event_interruptible(phba->work_waitq,
501 check_work_wait_done(phba)); 468 (test_and_clear_bit(LPFC_DATA_READY,
502 469 &phba->data_flags)
470 || kthread_should_stop()));
503 BUG_ON(rc); 471 BUG_ON(rc);
504 472
505 if (kthread_should_stop()) 473 if (kthread_should_stop())
506 break; 474 break;
507 475
476 /* Attend pending lpfc data processing */
508 lpfc_work_done(phba); 477 lpfc_work_done(phba);
509
510 /* If there is alot of slow ring work, like during link up
511 * check_work_wait_done() may cause this thread to not give
512 * up the CPU for very long periods of time. This may cause
513 * soft lockups or other problems. To avoid these situations
514 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
515 * consecutive iterations.
516 */
517 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
518 phba->work_found = 0;
519 schedule();
520 }
521 } 478 }
522 spin_lock_irq(&phba->hbalock);
523 phba->work_wait = NULL;
524 spin_unlock_irq(&phba->hbalock);
525 return 0; 479 return 0;
526} 480}
527 481
@@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
551 505
552 spin_lock_irqsave(&phba->hbalock, flags); 506 spin_lock_irqsave(&phba->hbalock, flags);
553 list_add_tail(&evtp->evt_listp, &phba->work_list); 507 list_add_tail(&evtp->evt_listp, &phba->work_list);
554 if (phba->work_wait)
555 lpfc_worker_wake_up(phba);
556 spin_unlock_irqrestore(&phba->hbalock, flags); 508 spin_unlock_irqrestore(&phba->hbalock, flags);
557 509
510 lpfc_worker_wake_up(phba);
511
558 return 1; 512 return 1;
559} 513}
560 514
@@ -963,6 +917,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
963 if (phba->fc_topology == TOPOLOGY_LOOP) { 917 if (phba->fc_topology == TOPOLOGY_LOOP) {
964 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 918 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
965 919
920 if (phba->cfg_enable_npiv)
921 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
922 "1309 Link Up Event npiv not supported in loop "
923 "topology\n");
966 /* Get Loop Map information */ 924 /* Get Loop Map information */
967 if (la->il) 925 if (la->il)
968 vport->fc_flag |= FC_LBIT; 926 vport->fc_flag |= FC_LBIT;
@@ -1087,6 +1045,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1087 MAILBOX_t *mb = &pmb->mb; 1045 MAILBOX_t *mb = &pmb->mb;
1088 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1046 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1089 1047
1048 /* Unblock ELS traffic */
1049 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1090 /* Check for error */ 1050 /* Check for error */
1091 if (mb->mbxStatus) { 1051 if (mb->mbxStatus) {
1092 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1052 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -1650,7 +1610,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1650 ndlp->nlp_DID, old_state, state); 1610 ndlp->nlp_DID, old_state, state);
1651 1611
1652 if (old_state == NLP_STE_NPR_NODE && 1612 if (old_state == NLP_STE_NPR_NODE &&
1653 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1654 state != NLP_STE_NPR_NODE) 1613 state != NLP_STE_NPR_NODE)
1655 lpfc_cancel_retry_delay_tmo(vport, ndlp); 1614 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1656 if (old_state == NLP_STE_UNMAPPED_NODE) { 1615 if (old_state == NLP_STE_UNMAPPED_NODE) {
@@ -1687,8 +1646,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1687{ 1646{
1688 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1647 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1689 1648
1690 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1649 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1691 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1692 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 1650 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1693 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 1651 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1694 spin_lock_irq(shost->host_lock); 1652 spin_lock_irq(shost->host_lock);
@@ -1701,8 +1659,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1701static void 1659static void
1702lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1660lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1703{ 1661{
1704 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1662 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1705 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1706 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 1663 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1707 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 1664 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1708 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 1665 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
@@ -2121,10 +2078,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2121 ndlp->nlp_last_elscmd = 0; 2078 ndlp->nlp_last_elscmd = 0;
2122 del_timer_sync(&ndlp->nlp_delayfunc); 2079 del_timer_sync(&ndlp->nlp_delayfunc);
2123 2080
2124 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 2081 list_del_init(&ndlp->els_retry_evt.evt_listp);
2125 list_del_init(&ndlp->els_retry_evt.evt_listp); 2082 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2126 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
2127 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2128 2083
2129 lpfc_unreg_rpi(vport, ndlp); 2084 lpfc_unreg_rpi(vport, ndlp);
2130 2085
@@ -2144,10 +2099,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2144 LPFC_MBOXQ_t *mbox; 2099 LPFC_MBOXQ_t *mbox;
2145 int rc; 2100 int rc;
2146 2101
2147 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 2102 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2148 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2149 }
2150
2151 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { 2103 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
2152 /* For this case we need to cleanup the default rpi 2104 /* For this case we need to cleanup the default rpi
2153 * allocated by the firmware. 2105 * allocated by the firmware.
@@ -2317,8 +2269,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2317 /* Since this node is marked for discovery, 2269 /* Since this node is marked for discovery,
2318 * delay timeout is not needed. 2270 * delay timeout is not needed.
2319 */ 2271 */
2320 if (ndlp->nlp_flag & NLP_DELAY_TMO) 2272 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2321 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2322 } else 2273 } else
2323 ndlp = NULL; 2274 ndlp = NULL;
2324 } else { 2275 } else {
@@ -2643,21 +2594,20 @@ lpfc_disc_timeout(unsigned long ptr)
2643{ 2594{
2644 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 2595 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2645 struct lpfc_hba *phba = vport->phba; 2596 struct lpfc_hba *phba = vport->phba;
2597 uint32_t tmo_posted;
2646 unsigned long flags = 0; 2598 unsigned long flags = 0;
2647 2599
2648 if (unlikely(!phba)) 2600 if (unlikely(!phba))
2649 return; 2601 return;
2650 2602
2651 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { 2603 spin_lock_irqsave(&vport->work_port_lock, flags);
2652 spin_lock_irqsave(&vport->work_port_lock, flags); 2604 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
2605 if (!tmo_posted)
2653 vport->work_port_events |= WORKER_DISC_TMO; 2606 vport->work_port_events |= WORKER_DISC_TMO;
2654 spin_unlock_irqrestore(&vport->work_port_lock, flags); 2607 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2655 2608
2656 spin_lock_irqsave(&phba->hbalock, flags); 2609 if (!tmo_posted)
2657 if (phba->work_wait) 2610 lpfc_worker_wake_up(phba);
2658 lpfc_worker_wake_up(phba);
2659 spin_unlock_irqrestore(&phba->hbalock, flags);
2660 }
2661 return; 2611 return;
2662} 2612}
2663 2613
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index fa757b251f82..5b6e5395c8eb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -145,8 +145,10 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
145 return -ERESTART; 145 return -ERESTART;
146 } 146 }
147 147
148 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) 148 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
149 mempool_free(pmb, phba->mbox_mem_pool);
149 return -EINVAL; 150 return -EINVAL;
151 }
150 152
151 /* Save information as VPD data */ 153 /* Save information as VPD data */
152 vp->rev.rBit = 1; 154 vp->rev.rBit = 1;
@@ -551,18 +553,18 @@ static void
551lpfc_hb_timeout(unsigned long ptr) 553lpfc_hb_timeout(unsigned long ptr)
552{ 554{
553 struct lpfc_hba *phba; 555 struct lpfc_hba *phba;
556 uint32_t tmo_posted;
554 unsigned long iflag; 557 unsigned long iflag;
555 558
556 phba = (struct lpfc_hba *)ptr; 559 phba = (struct lpfc_hba *)ptr;
557 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 560 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
558 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) 561 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
562 if (!tmo_posted)
559 phba->pport->work_port_events |= WORKER_HB_TMO; 563 phba->pport->work_port_events |= WORKER_HB_TMO;
560 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 564 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
561 565
562 spin_lock_irqsave(&phba->hbalock, iflag); 566 if (!tmo_posted)
563 if (phba->work_wait) 567 lpfc_worker_wake_up(phba);
564 wake_up(phba->work_wait);
565 spin_unlock_irqrestore(&phba->hbalock, iflag);
566 return; 568 return;
567} 569}
568 570
@@ -851,6 +853,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
851 lpfc_read_la(phba, pmb, mp); 853 lpfc_read_la(phba, pmb, mp);
852 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 854 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
853 pmb->vport = vport; 855 pmb->vport = vport;
856 /* Block ELS IOCBs until we have processed this mbox command */
857 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
854 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 858 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
855 if (rc == MBX_NOT_FINISHED) { 859 if (rc == MBX_NOT_FINISHED) {
856 rc = 4; 860 rc = 4;
@@ -866,6 +870,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
866 return; 870 return;
867 871
868lpfc_handle_latt_free_mbuf: 872lpfc_handle_latt_free_mbuf:
873 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
869 lpfc_mbuf_free(phba, mp->virt, mp->phys); 874 lpfc_mbuf_free(phba, mp->virt, mp->phys);
870lpfc_handle_latt_free_mp: 875lpfc_handle_latt_free_mp:
871 kfree(mp); 876 kfree(mp);
@@ -1194,8 +1199,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1194/* Returns the number of buffers NOT posted. */ 1199/* Returns the number of buffers NOT posted. */
1195/**************************************************/ 1200/**************************************************/
1196int 1201int
1197lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, 1202lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1198 int type)
1199{ 1203{
1200 IOCB_t *icmd; 1204 IOCB_t *icmd;
1201 struct lpfc_iocbq *iocb; 1205 struct lpfc_iocbq *iocb;
@@ -1295,7 +1299,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
1295 struct lpfc_sli *psli = &phba->sli; 1299 struct lpfc_sli *psli = &phba->sli;
1296 1300
1297 /* Ring 0, ELS / CT buffers */ 1301 /* Ring 0, ELS / CT buffers */
1298 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); 1302 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1299 /* Ring 2 - FCP no buffers needed */ 1303 /* Ring 2 - FCP no buffers needed */
1300 1304
1301 return 0; 1305 return 0;
@@ -1454,6 +1458,15 @@ lpfc_cleanup(struct lpfc_vport *vport)
1454 1458
1455 lpfc_disc_state_machine(vport, ndlp, NULL, 1459 lpfc_disc_state_machine(vport, ndlp, NULL,
1456 NLP_EVT_DEVICE_RM); 1460 NLP_EVT_DEVICE_RM);
1461
1462 /* nlp_type zero is not defined, nlp_flag zero also not defined,
1463 * nlp_state is unused, this happens when
1464 * an initiator has logged
1465 * into us so cleanup this ndlp.
1466 */
1467 if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) &&
1468 (ndlp->nlp_state == 0))
1469 lpfc_nlp_put(ndlp);
1457 } 1470 }
1458 1471
1459 /* At this point, ALL ndlp's should be gone 1472 /* At this point, ALL ndlp's should be gone
@@ -2101,6 +2114,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2101 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 2114 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
2102 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 2115 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
2103 2116
2117 /* Initialize the wait queue head for the kernel thread */
2118 init_waitqueue_head(&phba->work_waitq);
2119
2104 /* Startup the kernel thread for this host adapter. */ 2120 /* Startup the kernel thread for this host adapter. */
2105 phba->worker_thread = kthread_run(lpfc_do_work, phba, 2121 phba->worker_thread = kthread_run(lpfc_do_work, phba,
2106 "lpfc_worker_%d", phba->brd_no); 2122 "lpfc_worker_%d", phba->brd_no);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d08c4c890744..6688a8689b56 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -235,10 +235,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
235 (iocb->iocb_cmpl) (phba, iocb, iocb); 235 (iocb->iocb_cmpl) (phba, iocb, iocb);
236 } 236 }
237 } 237 }
238 238 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
239 /* If we are delaying issuing an ELS command, cancel it */
240 if (ndlp->nlp_flag & NLP_DELAY_TMO)
241 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
242 return 0; 239 return 0;
243} 240}
244 241
@@ -249,7 +246,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 246 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
250 struct lpfc_hba *phba = vport->phba; 247 struct lpfc_hba *phba = vport->phba;
251 struct lpfc_dmabuf *pcmd; 248 struct lpfc_dmabuf *pcmd;
252 struct lpfc_work_evt *evtp;
253 uint32_t *lp; 249 uint32_t *lp;
254 IOCB_t *icmd; 250 IOCB_t *icmd;
255 struct serv_parm *sp; 251 struct serv_parm *sp;
@@ -425,73 +421,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
425 ndlp, mbox); 421 ndlp, mbox);
426 return 1; 422 return 1;
427 } 423 }
428
429 /* If the remote NPort logs into us, before we can initiate
430 * discovery to them, cleanup the NPort from discovery accordingly.
431 */
432 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
433 spin_lock_irq(shost->host_lock);
434 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
435 spin_unlock_irq(shost->host_lock);
436 del_timer_sync(&ndlp->nlp_delayfunc);
437 ndlp->nlp_last_elscmd = 0;
438
439 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) {
440 list_del_init(&ndlp->els_retry_evt.evt_listp);
441 /* Decrement ndlp reference count held for the
442 * delayed retry
443 */
444 evtp = &ndlp->els_retry_evt;
445 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
446 }
447
448 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
449 spin_lock_irq(shost->host_lock);
450 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
451 spin_unlock_irq(shost->host_lock);
452
453 if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
454 (vport->num_disc_nodes)) {
455 /* Check to see if there are more
456 * ADISCs to be sent
457 */
458 lpfc_more_adisc(vport);
459
460 if ((vport->num_disc_nodes == 0) &&
461 (vport->fc_npr_cnt))
462 lpfc_els_disc_plogi(vport);
463
464 if (vport->num_disc_nodes == 0) {
465 spin_lock_irq(shost->host_lock);
466 vport->fc_flag &= ~FC_NDISC_ACTIVE;
467 spin_unlock_irq(shost->host_lock);
468 lpfc_can_disctmo(vport);
469 lpfc_end_rscn(vport);
470 }
471 }
472 }
473 } else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
474 (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
475 (vport->num_disc_nodes)) {
476 spin_lock_irq(shost->host_lock);
477 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
478 spin_unlock_irq(shost->host_lock);
479 /* Check to see if there are more
480 * PLOGIs to be sent
481 */
482 lpfc_more_plogi(vport);
483 if (vport->num_disc_nodes == 0) {
484 spin_lock_irq(shost->host_lock);
485 vport->fc_flag &= ~FC_NDISC_ACTIVE;
486 spin_unlock_irq(shost->host_lock);
487 lpfc_can_disctmo(vport);
488 lpfc_end_rscn(vport);
489 }
490 }
491
492 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); 424 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
493 return 1; 425 return 1;
494
495out: 426out:
496 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 427 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
497 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 428 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
@@ -574,7 +505,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
574 else 505 else
575 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 506 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
576 507
577 if (!(ndlp->nlp_type & NLP_FABRIC) || 508 if ((!(ndlp->nlp_type & NLP_FABRIC) &&
509 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
510 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
578 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 511 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
579 /* Only try to re-login if this is NOT a Fabric Node */ 512 /* Only try to re-login if this is NOT a Fabric Node */
580 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 513 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
@@ -751,6 +684,7 @@ static uint32_t
751lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 684lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
752 void *arg, uint32_t evt) 685 void *arg, uint32_t evt)
753{ 686{
687 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
754 struct lpfc_hba *phba = vport->phba; 688 struct lpfc_hba *phba = vport->phba;
755 struct lpfc_iocbq *cmdiocb = arg; 689 struct lpfc_iocbq *cmdiocb = arg;
756 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 690 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -776,7 +710,22 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
776 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 710 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
777 NULL); 711 NULL);
778 } else { 712 } else {
779 lpfc_rcv_plogi(vport, ndlp, cmdiocb); 713 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
714 (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
715 (vport->num_disc_nodes)) {
716 spin_lock_irq(shost->host_lock);
717 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
718 spin_unlock_irq(shost->host_lock);
719 /* Check if there are more PLOGIs to be sent */
720 lpfc_more_plogi(vport);
721 if (vport->num_disc_nodes == 0) {
722 spin_lock_irq(shost->host_lock);
723 vport->fc_flag &= ~FC_NDISC_ACTIVE;
724 spin_unlock_irq(shost->host_lock);
725 lpfc_can_disctmo(vport);
726 lpfc_end_rscn(vport);
727 }
728 }
780 } /* If our portname was less */ 729 } /* If our portname was less */
781 730
782 return ndlp->nlp_state; 731 return ndlp->nlp_state;
@@ -1040,6 +989,7 @@ static uint32_t
1040lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 989lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1041 void *arg, uint32_t evt) 990 void *arg, uint32_t evt)
1042{ 991{
992 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1043 struct lpfc_hba *phba = vport->phba; 993 struct lpfc_hba *phba = vport->phba;
1044 struct lpfc_iocbq *cmdiocb; 994 struct lpfc_iocbq *cmdiocb;
1045 995
@@ -1048,9 +998,28 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1048 998
1049 cmdiocb = (struct lpfc_iocbq *) arg; 999 cmdiocb = (struct lpfc_iocbq *) arg;
1050 1000
1051 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) 1001 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1052 return ndlp->nlp_state; 1002 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1003 spin_lock_irq(shost->host_lock);
1004 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1005 spin_unlock_irq(shost->host_lock);
1053 1006
1007 if (vport->num_disc_nodes) {
1008 lpfc_more_adisc(vport);
1009 if ((vport->num_disc_nodes == 0) &&
1010 (vport->fc_npr_cnt))
1011 lpfc_els_disc_plogi(vport);
1012 if (vport->num_disc_nodes == 0) {
1013 spin_lock_irq(shost->host_lock);
1014 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1015 spin_unlock_irq(shost->host_lock);
1016 lpfc_can_disctmo(vport);
1017 lpfc_end_rscn(vport);
1018 }
1019 }
1020 }
1021 return ndlp->nlp_state;
1022 }
1054 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1023 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1055 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 1024 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1056 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1025 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -1742,24 +1711,21 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1742 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1711 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1743 1712
1744 /* Ignore PLOGI if we have an outstanding LOGO */ 1713 /* Ignore PLOGI if we have an outstanding LOGO */
1745 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) { 1714 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
1746 return ndlp->nlp_state; 1715 return ndlp->nlp_state;
1747 }
1748
1749 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { 1716 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1717 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1750 spin_lock_irq(shost->host_lock); 1718 spin_lock_irq(shost->host_lock);
1751 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1719 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1752 spin_unlock_irq(shost->host_lock); 1720 spin_unlock_irq(shost->host_lock);
1753 return ndlp->nlp_state; 1721 } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1754 } 1722 /* send PLOGI immediately, move to PLOGI issue state */
1755 1723 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1756 /* send PLOGI immediately, move to PLOGI issue state */ 1724 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1757 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1725 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1758 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1726 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1759 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1727 }
1760 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1761 } 1728 }
1762
1763 return ndlp->nlp_state; 1729 return ndlp->nlp_state;
1764} 1730}
1765 1731
@@ -1810,7 +1776,6 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1810 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1776 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1811 1777
1812 lpfc_rcv_padisc(vport, ndlp, cmdiocb); 1778 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1813
1814 /* 1779 /*
1815 * Do not start discovery if discovery is about to start 1780 * Do not start discovery if discovery is about to start
1816 * or discovery in progress for this node. Starting discovery 1781 * or discovery in progress for this node. Starting discovery
@@ -1973,9 +1938,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1973 spin_lock_irq(shost->host_lock); 1938 spin_lock_irq(shost->host_lock);
1974 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1939 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1975 spin_unlock_irq(shost->host_lock); 1940 spin_unlock_irq(shost->host_lock);
1976 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1941 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1977 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1978 }
1979 return ndlp->nlp_state; 1942 return ndlp->nlp_state;
1980} 1943}
1981 1944
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 0910a9ab76a5..c94da4f2b8a6 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -50,6 +50,7 @@ void
50lpfc_adjust_queue_depth(struct lpfc_hba *phba) 50lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51{ 51{
52 unsigned long flags; 52 unsigned long flags;
53 uint32_t evt_posted;
53 54
54 spin_lock_irqsave(&phba->hbalock, flags); 55 spin_lock_irqsave(&phba->hbalock, flags);
55 atomic_inc(&phba->num_rsrc_err); 56 atomic_inc(&phba->num_rsrc_err);
@@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
65 spin_unlock_irqrestore(&phba->hbalock, flags); 66 spin_unlock_irqrestore(&phba->hbalock, flags);
66 67
67 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 68 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68 if ((phba->pport->work_port_events & 69 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
69 WORKER_RAMP_DOWN_QUEUE) == 0) { 70 if (!evt_posted)
70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 71 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71 }
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73 73
74 spin_lock_irqsave(&phba->hbalock, flags); 74 if (!evt_posted)
75 if (phba->work_wait) 75 lpfc_worker_wake_up(phba);
76 wake_up(phba->work_wait);
77 spin_unlock_irqrestore(&phba->hbalock, flags);
78
79 return; 76 return;
80} 77}
81 78
@@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
89{ 86{
90 unsigned long flags; 87 unsigned long flags;
91 struct lpfc_hba *phba = vport->phba; 88 struct lpfc_hba *phba = vport->phba;
89 uint32_t evt_posted;
92 atomic_inc(&phba->num_cmd_success); 90 atomic_inc(&phba->num_cmd_success);
93 91
94 if (vport->cfg_lun_queue_depth <= sdev->queue_depth) 92 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
@@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
103 spin_unlock_irqrestore(&phba->hbalock, flags); 101 spin_unlock_irqrestore(&phba->hbalock, flags);
104 102
105 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 103 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
106 if ((phba->pport->work_port_events & 104 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
107 WORKER_RAMP_UP_QUEUE) == 0) { 105 if (!evt_posted)
108 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 106 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
109 }
110 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 107 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
111 108
112 spin_lock_irqsave(&phba->hbalock, flags); 109 if (!evt_posted)
113 if (phba->work_wait) 110 lpfc_worker_wake_up(phba);
114 wake_up(phba->work_wait); 111 return;
115 spin_unlock_irqrestore(&phba->hbalock, flags);
116} 112}
117 113
118void 114void
@@ -609,9 +605,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
609 result = cmd->result; 605 result = cmd->result;
610 sdev = cmd->device; 606 sdev = cmd->device;
611 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 607 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
612 spin_lock_irqsave(sdev->host->host_lock, flags);
613 lpfc_cmd->pCmd = NULL; /* This must be done before scsi_done */
614 spin_unlock_irqrestore(sdev->host->host_lock, flags);
615 cmd->scsi_done(cmd); 608 cmd->scsi_done(cmd);
616 609
617 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 610 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -620,6 +613,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
620 * wake up the thread. 613 * wake up the thread.
621 */ 614 */
622 spin_lock_irqsave(sdev->host->host_lock, flags); 615 spin_lock_irqsave(sdev->host->host_lock, flags);
616 lpfc_cmd->pCmd = NULL;
623 if (lpfc_cmd->waitq) 617 if (lpfc_cmd->waitq)
624 wake_up(lpfc_cmd->waitq); 618 wake_up(lpfc_cmd->waitq);
625 spin_unlock_irqrestore(sdev->host->host_lock, flags); 619 spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -690,6 +684,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
690 * wake up the thread. 684 * wake up the thread.
691 */ 685 */
692 spin_lock_irqsave(sdev->host->host_lock, flags); 686 spin_lock_irqsave(sdev->host->host_lock, flags);
687 lpfc_cmd->pCmd = NULL;
693 if (lpfc_cmd->waitq) 688 if (lpfc_cmd->waitq)
694 wake_up(lpfc_cmd->waitq); 689 wake_up(lpfc_cmd->waitq);
695 spin_unlock_irqrestore(sdev->host->host_lock, flags); 690 spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -849,14 +844,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
849 struct lpfc_iocbq *iocbq; 844 struct lpfc_iocbq *iocbq;
850 struct lpfc_iocbq *iocbqrsp; 845 struct lpfc_iocbq *iocbqrsp;
851 int ret; 846 int ret;
847 int status;
852 848
853 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) 849 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
854 return FAILED; 850 return FAILED;
855 851
856 lpfc_cmd->rdata = rdata; 852 lpfc_cmd->rdata = rdata;
857 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, 853 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
858 FCP_TARGET_RESET); 854 FCP_TARGET_RESET);
859 if (!ret) 855 if (!status)
860 return FAILED; 856 return FAILED;
861 857
862 iocbq = &lpfc_cmd->cur_iocbq; 858 iocbq = &lpfc_cmd->cur_iocbq;
@@ -869,12 +865,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
869 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 865 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
870 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 866 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
871 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 867 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
872 ret = lpfc_sli_issue_iocb_wait(phba, 868 status = lpfc_sli_issue_iocb_wait(phba,
873 &phba->sli.ring[phba->sli.fcp_ring], 869 &phba->sli.ring[phba->sli.fcp_ring],
874 iocbq, iocbqrsp, lpfc_cmd->timeout); 870 iocbq, iocbqrsp, lpfc_cmd->timeout);
875 if (ret != IOCB_SUCCESS) { 871 if (status != IOCB_SUCCESS) {
876 if (ret == IOCB_TIMEDOUT) 872 if (status == IOCB_TIMEDOUT) {
877 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 873 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
874 ret = TIMEOUT_ERROR;
875 } else
876 ret = FAILED;
878 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 877 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
879 } else { 878 } else {
880 ret = SUCCESS; 879 ret = SUCCESS;
@@ -1142,121 +1141,96 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1142 struct lpfc_iocbq *iocbq, *iocbqrsp; 1141 struct lpfc_iocbq *iocbq, *iocbqrsp;
1143 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1142 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1144 struct lpfc_nodelist *pnode = rdata->pnode; 1143 struct lpfc_nodelist *pnode = rdata->pnode;
1145 uint32_t cmd_result = 0, cmd_status = 0; 1144 unsigned long later;
1146 int ret = FAILED; 1145 int ret = SUCCESS;
1147 int iocb_status = IOCB_SUCCESS; 1146 int status;
1148 int cnt, loopcnt; 1147 int cnt;
1149 1148
1150 lpfc_block_error_handler(cmnd); 1149 lpfc_block_error_handler(cmnd);
1151 loopcnt = 0;
1152 /* 1150 /*
1153 * If target is not in a MAPPED state, delay the reset until 1151 * If target is not in a MAPPED state, delay the reset until
1154 * target is rediscovered or devloss timeout expires. 1152 * target is rediscovered or devloss timeout expires.
1155 */ 1153 */
1156 while (1) { 1154 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1155 while (time_after(later, jiffies)) {
1157 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 1156 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1158 goto out; 1157 return FAILED;
1159
1160 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1161 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1162 loopcnt++;
1163 rdata = cmnd->device->hostdata;
1164 if (!rdata ||
1165 (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
1166 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1167 "0721 LUN Reset rport "
1168 "failure: cnt x%x rdata x%p\n",
1169 loopcnt, rdata);
1170 goto out;
1171 }
1172 pnode = rdata->pnode;
1173 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1174 goto out;
1175 }
1176 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1158 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1177 break; 1159 break;
1160 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1161 rdata = cmnd->device->hostdata;
1162 if (!rdata)
1163 break;
1164 pnode = rdata->pnode;
1165 }
1166 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1167 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1168 "0721 LUN Reset rport "
1169 "failure: msec x%x rdata x%p\n",
1170 jiffies_to_msecs(jiffies - later), rdata);
1171 return FAILED;
1178 } 1172 }
1179
1180 lpfc_cmd = lpfc_get_scsi_buf(phba); 1173 lpfc_cmd = lpfc_get_scsi_buf(phba);
1181 if (lpfc_cmd == NULL) 1174 if (lpfc_cmd == NULL)
1182 goto out; 1175 return FAILED;
1183
1184 lpfc_cmd->timeout = 60; 1176 lpfc_cmd->timeout = 60;
1185 lpfc_cmd->rdata = rdata; 1177 lpfc_cmd->rdata = rdata;
1186 1178
1187 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, 1179 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
1188 FCP_TARGET_RESET); 1180 cmnd->device->lun,
1189 if (!ret) 1181 FCP_TARGET_RESET);
1190 goto out_free_scsi_buf; 1182 if (!status) {
1191 1183 lpfc_release_scsi_buf(phba, lpfc_cmd);
1184 return FAILED;
1185 }
1192 iocbq = &lpfc_cmd->cur_iocbq; 1186 iocbq = &lpfc_cmd->cur_iocbq;
1193 1187
1194 /* get a buffer for this IOCB command response */ 1188 /* get a buffer for this IOCB command response */
1195 iocbqrsp = lpfc_sli_get_iocbq(phba); 1189 iocbqrsp = lpfc_sli_get_iocbq(phba);
1196 if (iocbqrsp == NULL) 1190 if (iocbqrsp == NULL) {
1197 goto out_free_scsi_buf; 1191 lpfc_release_scsi_buf(phba, lpfc_cmd);
1198 1192 return FAILED;
1193 }
1199 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1194 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1200 "0703 Issue target reset to TGT %d LUN %d " 1195 "0703 Issue target reset to TGT %d LUN %d "
1201 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 1196 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1202 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1197 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1203 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1198 status = lpfc_sli_issue_iocb_wait(phba,
1204 &phba->sli.ring[phba->sli.fcp_ring], 1199 &phba->sli.ring[phba->sli.fcp_ring],
1205 iocbq, iocbqrsp, lpfc_cmd->timeout); 1200 iocbq, iocbqrsp, lpfc_cmd->timeout);
1206 1201 if (status == IOCB_TIMEDOUT) {
1207 if (iocb_status == IOCB_TIMEDOUT)
1208 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 1202 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1209 1203 ret = TIMEOUT_ERROR;
1210 if (iocb_status == IOCB_SUCCESS) 1204 } else {
1211 ret = SUCCESS; 1205 if (status != IOCB_SUCCESS)
1212 else 1206 ret = FAILED;
1213 ret = iocb_status; 1207 lpfc_release_scsi_buf(phba, lpfc_cmd);
1214 1208 }
1215 cmd_result = iocbqrsp->iocb.un.ulpWord[4]; 1209 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1216 cmd_status = iocbqrsp->iocb.ulpStatus; 1210 "0713 SCSI layer issued device reset (%d, %d) "
1217 1211 "return x%x status x%x result x%x\n",
1212 cmnd->device->id, cmnd->device->lun, ret,
1213 iocbqrsp->iocb.ulpStatus,
1214 iocbqrsp->iocb.un.ulpWord[4]);
1218 lpfc_sli_release_iocbq(phba, iocbqrsp); 1215 lpfc_sli_release_iocbq(phba, iocbqrsp);
1219
1220 /*
1221 * All outstanding txcmplq I/Os should have been aborted by the device.
1222 * Unfortunately, some targets do not abide by this forcing the driver
1223 * to double check.
1224 */
1225 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, 1216 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1226 LPFC_CTX_LUN); 1217 LPFC_CTX_TGT);
1227 if (cnt) 1218 if (cnt)
1228 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1219 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1229 cmnd->device->id, cmnd->device->lun, 1220 cmnd->device->id, cmnd->device->lun,
1230 LPFC_CTX_LUN); 1221 LPFC_CTX_TGT);
1231 loopcnt = 0; 1222 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1232 while(cnt) { 1223 while (time_after(later, jiffies) && cnt) {
1233 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1224 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1234
1235 if (++loopcnt
1236 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1237 break;
1238
1239 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, 1225 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1240 cmnd->device->lun, LPFC_CTX_LUN); 1226 cmnd->device->lun, LPFC_CTX_TGT);
1241 } 1227 }
1242
1243 if (cnt) { 1228 if (cnt) {
1244 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1229 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1245 "0719 device reset I/O flush failure: " 1230 "0719 device reset I/O flush failure: "
1246 "cnt x%x\n", cnt); 1231 "cnt x%x\n", cnt);
1247 ret = FAILED; 1232 ret = FAILED;
1248 } 1233 }
1249
1250out_free_scsi_buf:
1251 if (iocb_status != IOCB_TIMEDOUT) {
1252 lpfc_release_scsi_buf(phba, lpfc_cmd);
1253 }
1254 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1255 "0713 SCSI layer issued device reset (%d, %d) "
1256 "return x%x status x%x result x%x\n",
1257 cmnd->device->id, cmnd->device->lun, ret,
1258 cmd_status, cmd_result);
1259out:
1260 return ret; 1234 return ret;
1261} 1235}
1262 1236
@@ -1268,19 +1242,12 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1268 struct lpfc_hba *phba = vport->phba; 1242 struct lpfc_hba *phba = vport->phba;
1269 struct lpfc_nodelist *ndlp = NULL; 1243 struct lpfc_nodelist *ndlp = NULL;
1270 int match; 1244 int match;
1271 int ret = FAILED, i, err_count = 0; 1245 int ret = SUCCESS, status, i;
1272 int cnt, loopcnt; 1246 int cnt;
1273 struct lpfc_scsi_buf * lpfc_cmd; 1247 struct lpfc_scsi_buf * lpfc_cmd;
1248 unsigned long later;
1274 1249
1275 lpfc_block_error_handler(cmnd); 1250 lpfc_block_error_handler(cmnd);
1276
1277 lpfc_cmd = lpfc_get_scsi_buf(phba);
1278 if (lpfc_cmd == NULL)
1279 goto out;
1280
1281 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1282 lpfc_cmd->timeout = 60;
1283
1284 /* 1251 /*
1285 * Since the driver manages a single bus device, reset all 1252 * Since the driver manages a single bus device, reset all
1286 * targets known to the driver. Should any target reset 1253 * targets known to the driver. Should any target reset
@@ -1294,7 +1261,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1294 if (!NLP_CHK_NODE_ACT(ndlp)) 1261 if (!NLP_CHK_NODE_ACT(ndlp))
1295 continue; 1262 continue;
1296 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1263 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1297 i == ndlp->nlp_sid && 1264 ndlp->nlp_sid == i &&
1298 ndlp->rport) { 1265 ndlp->rport) {
1299 match = 1; 1266 match = 1;
1300 break; 1267 break;
@@ -1303,27 +1270,22 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1303 spin_unlock_irq(shost->host_lock); 1270 spin_unlock_irq(shost->host_lock);
1304 if (!match) 1271 if (!match)
1305 continue; 1272 continue;
1306 1273 lpfc_cmd = lpfc_get_scsi_buf(phba);
1307 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 1274 if (lpfc_cmd) {
1308 cmnd->device->lun, 1275 lpfc_cmd->timeout = 60;
1309 ndlp->rport->dd_data); 1276 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1310 if (ret != SUCCESS) { 1277 cmnd->device->lun,
1278 ndlp->rport->dd_data);
1279 if (status != TIMEOUT_ERROR)
1280 lpfc_release_scsi_buf(phba, lpfc_cmd);
1281 }
1282 if (!lpfc_cmd || status != SUCCESS) {
1311 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1283 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1312 "0700 Bus Reset on target %d failed\n", 1284 "0700 Bus Reset on target %d failed\n",
1313 i); 1285 i);
1314 err_count++; 1286 ret = FAILED;
1315 break;
1316 } 1287 }
1317 } 1288 }
1318
1319 if (ret != IOCB_TIMEDOUT)
1320 lpfc_release_scsi_buf(phba, lpfc_cmd);
1321
1322 if (err_count == 0)
1323 ret = SUCCESS;
1324 else
1325 ret = FAILED;
1326
1327 /* 1289 /*
1328 * All outstanding txcmplq I/Os should have been aborted by 1290 * All outstanding txcmplq I/Os should have been aborted by
1329 * the targets. Unfortunately, some targets do not abide by 1291 * the targets. Unfortunately, some targets do not abide by
@@ -1333,27 +1295,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1333 if (cnt) 1295 if (cnt)
1334 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1296 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1335 0, 0, LPFC_CTX_HOST); 1297 0, 0, LPFC_CTX_HOST);
1336 loopcnt = 0; 1298 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1337 while(cnt) { 1299 while (time_after(later, jiffies) && cnt) {
1338 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1300 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1339
1340 if (++loopcnt
1341 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1342 break;
1343
1344 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 1301 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1345 } 1302 }
1346
1347 if (cnt) { 1303 if (cnt) {
1348 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1304 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1349 "0715 Bus Reset I/O flush failure: " 1305 "0715 Bus Reset I/O flush failure: "
1350 "cnt x%x left x%x\n", cnt, i); 1306 "cnt x%x left x%x\n", cnt, i);
1351 ret = FAILED; 1307 ret = FAILED;
1352 } 1308 }
1353
1354 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1309 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1355 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 1310 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
1356out:
1357 return ret; 1311 return ret;
1358} 1312}
1359 1313
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 70a0a9eab211..f40aa7b905f7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -324,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
324 phba->work_ha |= HA_ERATT; 324 phba->work_ha |= HA_ERATT;
325 phba->work_hs = HS_FFER3; 325 phba->work_hs = HS_FFER3;
326 326
327 /* hbalock should already be held */ 327 lpfc_worker_wake_up(phba);
328 if (phba->work_wait)
329 lpfc_worker_wake_up(phba);
330 328
331 return NULL; 329 return NULL;
332 } 330 }
@@ -1309,9 +1307,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1309 phba->work_ha |= HA_ERATT; 1307 phba->work_ha |= HA_ERATT;
1310 phba->work_hs = HS_FFER3; 1308 phba->work_hs = HS_FFER3;
1311 1309
1312 /* hbalock should already be held */ 1310 lpfc_worker_wake_up(phba);
1313 if (phba->work_wait)
1314 lpfc_worker_wake_up(phba);
1315 1311
1316 return; 1312 return;
1317} 1313}
@@ -2611,12 +2607,9 @@ lpfc_mbox_timeout(unsigned long ptr)
2611 phba->pport->work_port_events |= WORKER_MBOX_TMO; 2607 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2612 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 2608 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2613 2609
2614 if (!tmo_posted) { 2610 if (!tmo_posted)
2615 spin_lock_irqsave(&phba->hbalock, iflag); 2611 lpfc_worker_wake_up(phba);
2616 if (phba->work_wait) 2612 return;
2617 lpfc_worker_wake_up(phba);
2618 spin_unlock_irqrestore(&phba->hbalock, iflag);
2619 }
2620} 2613}
2621 2614
2622void 2615void
@@ -3374,8 +3367,12 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
3374 for (i = 0; i < psli->num_rings; i++) { 3367 for (i = 0; i < psli->num_rings; i++) {
3375 pring = &psli->ring[i]; 3368 pring = &psli->ring[i];
3376 prev_pring_flag = pring->flag; 3369 prev_pring_flag = pring->flag;
3377 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3370 /* Only slow rings */
3371 if (pring->ringno == LPFC_ELS_RING) {
3378 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3372 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3373 /* Set the lpfc data pending flag */
3374 set_bit(LPFC_DATA_READY, &phba->data_flags);
3375 }
3379 /* 3376 /*
3380 * Error everything on the txq since these iocbs have not been 3377 * Error everything on the txq since these iocbs have not been
3381 * given to the FW yet. 3378 * given to the FW yet.
@@ -3434,8 +3431,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3434 spin_lock_irqsave(&phba->hbalock, flags); 3431 spin_lock_irqsave(&phba->hbalock, flags);
3435 for (i = 0; i < psli->num_rings; i++) { 3432 for (i = 0; i < psli->num_rings; i++) {
3436 pring = &psli->ring[i]; 3433 pring = &psli->ring[i];
3437 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3434 /* Only slow rings */
3435 if (pring->ringno == LPFC_ELS_RING) {
3438 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3436 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3437 /* Set the lpfc data pending flag */
3438 set_bit(LPFC_DATA_READY, &phba->data_flags);
3439 }
3439 3440
3440 /* 3441 /*
3441 * Error everything on the txq since these iocbs have not been 3442 * Error everything on the txq since these iocbs have not been
@@ -3762,7 +3763,6 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3762 lpfc_ctx_cmd ctx_cmd) 3763 lpfc_ctx_cmd ctx_cmd)
3763{ 3764{
3764 struct lpfc_scsi_buf *lpfc_cmd; 3765 struct lpfc_scsi_buf *lpfc_cmd;
3765 struct scsi_cmnd *cmnd;
3766 int rc = 1; 3766 int rc = 1;
3767 3767
3768 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 3768 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
@@ -3772,19 +3772,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3772 return rc; 3772 return rc;
3773 3773
3774 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 3774 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3775 cmnd = lpfc_cmd->pCmd;
3776 3775
3777 if (cmnd == NULL) 3776 if (lpfc_cmd->pCmd == NULL)
3778 return rc; 3777 return rc;
3779 3778
3780 switch (ctx_cmd) { 3779 switch (ctx_cmd) {
3781 case LPFC_CTX_LUN: 3780 case LPFC_CTX_LUN:
3782 if ((cmnd->device->id == tgt_id) && 3781 if ((lpfc_cmd->rdata->pnode) &&
3783 (cmnd->device->lun == lun_id)) 3782 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
3783 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
3784 rc = 0; 3784 rc = 0;
3785 break; 3785 break;
3786 case LPFC_CTX_TGT: 3786 case LPFC_CTX_TGT:
3787 if (cmnd->device->id == tgt_id) 3787 if ((lpfc_cmd->rdata->pnode) &&
3788 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
3788 rc = 0; 3789 rc = 0;
3789 break; 3790 break;
3790 case LPFC_CTX_HOST: 3791 case LPFC_CTX_HOST:
@@ -3994,6 +3995,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3994 if (pmboxq->context1) 3995 if (pmboxq->context1)
3995 return MBX_NOT_FINISHED; 3996 return MBX_NOT_FINISHED;
3996 3997
3998 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
3997 /* setup wake call as IOCB callback */ 3999 /* setup wake call as IOCB callback */
3998 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 4000 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3999 /* setup context field to pass wait_queue pointer to wake function */ 4001 /* setup context field to pass wait_queue pointer to wake function */
@@ -4159,7 +4161,7 @@ lpfc_intr_handler(int irq, void *dev_id)
4159 "pwork:x%x hawork:x%x wait:x%x", 4161 "pwork:x%x hawork:x%x wait:x%x",
4160 phba->work_ha, work_ha_copy, 4162 phba->work_ha, work_ha_copy,
4161 (uint32_t)((unsigned long) 4163 (uint32_t)((unsigned long)
4162 phba->work_wait)); 4164 &phba->work_waitq));
4163 4165
4164 control &= 4166 control &=
4165 ~(HC_R0INT_ENA << LPFC_ELS_RING); 4167 ~(HC_R0INT_ENA << LPFC_ELS_RING);
@@ -4172,7 +4174,7 @@ lpfc_intr_handler(int irq, void *dev_id)
4172 "x%x hawork:x%x wait:x%x", 4174 "x%x hawork:x%x wait:x%x",
4173 phba->work_ha, work_ha_copy, 4175 phba->work_ha, work_ha_copy,
4174 (uint32_t)((unsigned long) 4176 (uint32_t)((unsigned long)
4175 phba->work_wait)); 4177 &phba->work_waitq));
4176 } 4178 }
4177 spin_unlock(&phba->hbalock); 4179 spin_unlock(&phba->hbalock);
4178 } 4180 }
@@ -4297,9 +4299,8 @@ send_current_mbox:
4297 4299
4298 spin_lock(&phba->hbalock); 4300 spin_lock(&phba->hbalock);
4299 phba->work_ha |= work_ha_copy; 4301 phba->work_ha |= work_ha_copy;
4300 if (phba->work_wait)
4301 lpfc_worker_wake_up(phba);
4302 spin_unlock(&phba->hbalock); 4302 spin_unlock(&phba->hbalock);
4303 lpfc_worker_wake_up(phba);
4303 } 4304 }
4304 4305
4305 ha_copy &= ~(phba->work_ha_mask); 4306 ha_copy &= ~(phba->work_ha_mask);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b22b893019f4..ad24cacfbe10 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.2.6" 21#define LPFC_DRIVER_VERSION "8.2.7"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 6feaf59b0b1b..109f89d98830 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -216,6 +216,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
216 int vpi; 216 int vpi;
217 int rc = VPORT_ERROR; 217 int rc = VPORT_ERROR;
218 int status; 218 int status;
219 int size;
219 220
220 if ((phba->sli_rev < 3) || 221 if ((phba->sli_rev < 3) ||
221 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { 222 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
@@ -278,7 +279,20 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
278 279
279 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); 280 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
280 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); 281 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
281 282 size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
283 if (size) {
284 vport->vname = kzalloc(size+1, GFP_KERNEL);
285 if (!vport->vname) {
286 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
287 "1814 Create VPORT failed. "
288 "vname allocation failed.\n");
289 rc = VPORT_ERROR;
290 lpfc_free_vpi(phba, vpi);
291 destroy_port(vport);
292 goto error_out;
293 }
294 memcpy(vport->vname, fc_vport->symbolic_name, size+1);
295 }
282 if (fc_vport->node_name != 0) 296 if (fc_vport->node_name != 0)
283 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); 297 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
284 if (fc_vport->port_name != 0) 298 if (fc_vport->port_name != 0)
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index fd63b06d9ef1..11aa917629ac 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1765,7 +1765,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1765 default: 1765 default:
1766 return 0; 1766 return 0;
1767 } 1767 }
1768 if (mesg.event == mdev->ofdev.dev.power.power_state.event) 1768 if (ms->phase == sleeping)
1769 return 0; 1769 return 0;
1770 1770
1771 scsi_block_requests(ms->host); 1771 scsi_block_requests(ms->host);
@@ -1780,8 +1780,6 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1780 disable_irq(ms->meshintr); 1780 disable_irq(ms->meshintr);
1781 set_mesh_power(ms, 0); 1781 set_mesh_power(ms, 0);
1782 1782
1783 mdev->ofdev.dev.power.power_state = mesg;
1784
1785 return 0; 1783 return 0;
1786} 1784}
1787 1785
@@ -1790,7 +1788,7 @@ static int mesh_resume(struct macio_dev *mdev)
1790 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); 1788 struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
1791 unsigned long flags; 1789 unsigned long flags;
1792 1790
1793 if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON) 1791 if (ms->phase != sleeping)
1794 return 0; 1792 return 0;
1795 1793
1796 set_mesh_power(ms, 1); 1794 set_mesh_power(ms, 1);
@@ -1801,8 +1799,6 @@ static int mesh_resume(struct macio_dev *mdev)
1801 enable_irq(ms->meshintr); 1799 enable_irq(ms->meshintr);
1802 scsi_unblock_requests(ms->host); 1800 scsi_unblock_requests(ms->host);
1803 1801
1804 mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
1805
1806 return 0; 1802 return 0;
1807} 1803}
1808 1804
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0c786944d2c2..5822dd595826 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -113,9 +113,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
113 .host_param_mask = ISCSI_HOST_HWADDRESS | 113 .host_param_mask = ISCSI_HOST_HWADDRESS |
114 ISCSI_HOST_IPADDRESS | 114 ISCSI_HOST_IPADDRESS |
115 ISCSI_HOST_INITIATOR_NAME, 115 ISCSI_HOST_INITIATOR_NAME,
116 .sessiondata_size = sizeof(struct ddb_entry),
117 .host_template = &qla4xxx_driver_template,
118
119 .tgt_dscvr = qla4xxx_tgt_dscvr, 116 .tgt_dscvr = qla4xxx_tgt_dscvr,
120 .get_conn_param = qla4xxx_conn_get_param, 117 .get_conn_param = qla4xxx_conn_get_param,
121 .get_session_param = qla4xxx_sess_get_param, 118 .get_session_param = qla4xxx_sess_get_param,
@@ -275,7 +272,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
275 return err; 272 return err;
276 } 273 }
277 274
278 ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0); 275 ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0);
279 if (!ddb_entry->conn) { 276 if (!ddb_entry->conn) {
280 iscsi_remove_session(ddb_entry->sess); 277 iscsi_remove_session(ddb_entry->sess);
281 DEBUG2(printk(KERN_ERR "Could not add connection.\n")); 278 DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
@@ -292,7 +289,8 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
292 struct ddb_entry *ddb_entry; 289 struct ddb_entry *ddb_entry;
293 struct iscsi_cls_session *sess; 290 struct iscsi_cls_session *sess;
294 291
295 sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport); 292 sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
293 sizeof(struct ddb_entry));
296 if (!sess) 294 if (!sess)
297 return NULL; 295 return NULL;
298 296
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 110e776d1a07..36c92f961e15 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -855,9 +855,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
855 855
856 good_bytes = scsi_bufflen(cmd); 856 good_bytes = scsi_bufflen(cmd);
857 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { 857 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
858 int old_good_bytes = good_bytes;
858 drv = scsi_cmd_to_driver(cmd); 859 drv = scsi_cmd_to_driver(cmd);
859 if (drv->done) 860 if (drv->done)
860 good_bytes = drv->done(cmd); 861 good_bytes = drv->done(cmd);
862 /*
863 * USB may not give sense identifying bad sector and
864 * simply return a residue instead, so subtract off the
865 * residue if drv->done() error processing indicates no
866 * change to the completion length.
867 */
868 if (good_bytes == old_good_bytes)
869 good_bytes -= scsi_get_resid(cmd);
861 } 870 }
862 scsi_io_completion(cmd, good_bytes); 871 scsi_io_completion(cmd, good_bytes);
863} 872}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f6600bfb5bde..01d11a01ffbf 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -94,6 +94,7 @@ static const char * scsi_debug_version_date = "20070104";
94#define DEF_VIRTUAL_GB 0 94#define DEF_VIRTUAL_GB 0
95#define DEF_FAKE_RW 0 95#define DEF_FAKE_RW 0
96#define DEF_VPD_USE_HOSTNO 1 96#define DEF_VPD_USE_HOSTNO 1
97#define DEF_SECTOR_SIZE 512
97 98
98/* bit mask values for scsi_debug_opts */ 99/* bit mask values for scsi_debug_opts */
99#define SCSI_DEBUG_OPT_NOISE 1 100#define SCSI_DEBUG_OPT_NOISE 1
@@ -142,6 +143,7 @@ static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
142static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; 143static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
143static int scsi_debug_fake_rw = DEF_FAKE_RW; 144static int scsi_debug_fake_rw = DEF_FAKE_RW;
144static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; 145static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
146static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
145 147
146static int scsi_debug_cmnd_count = 0; 148static int scsi_debug_cmnd_count = 0;
147 149
@@ -157,11 +159,6 @@ static int sdebug_heads; /* heads per disk */
157static int sdebug_cylinders_per; /* cylinders per surface */ 159static int sdebug_cylinders_per; /* cylinders per surface */
158static int sdebug_sectors_per; /* sectors per cylinder */ 160static int sdebug_sectors_per; /* sectors per cylinder */
159 161
160/* default sector size is 512 bytes, 2**9 bytes */
161#define POW2_SECT_SIZE 9
162#define SECT_SIZE (1 << POW2_SECT_SIZE)
163#define SECT_SIZE_PER(TGT) SECT_SIZE
164
165#define SDEBUG_MAX_PARTS 4 162#define SDEBUG_MAX_PARTS 4
166 163
167#define SDEBUG_SENSE_LEN 32 164#define SDEBUG_SENSE_LEN 32
@@ -646,6 +643,14 @@ static int inquiry_evpd_b0(unsigned char * arr)
646 return sizeof(vpdb0_data); 643 return sizeof(vpdb0_data);
647} 644}
648 645
646static int inquiry_evpd_b1(unsigned char *arr)
647{
648 memset(arr, 0, 0x3c);
649 arr[0] = 0;
650 arr[1] = 1;
651
652 return 0x3c;
653}
649 654
650#define SDEBUG_LONG_INQ_SZ 96 655#define SDEBUG_LONG_INQ_SZ 96
651#define SDEBUG_MAX_INQ_ARR_SZ 584 656#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -701,6 +706,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
701 arr[n++] = 0x88; /* SCSI ports */ 706 arr[n++] = 0x88; /* SCSI ports */
702 arr[n++] = 0x89; /* ATA information */ 707 arr[n++] = 0x89; /* ATA information */
703 arr[n++] = 0xb0; /* Block limits (SBC) */ 708 arr[n++] = 0xb0; /* Block limits (SBC) */
709 arr[n++] = 0xb1; /* Block characteristics (SBC) */
704 arr[3] = n - 4; /* number of supported VPD pages */ 710 arr[3] = n - 4; /* number of supported VPD pages */
705 } else if (0x80 == cmd[2]) { /* unit serial number */ 711 } else if (0x80 == cmd[2]) { /* unit serial number */
706 arr[1] = cmd[2]; /*sanity */ 712 arr[1] = cmd[2]; /*sanity */
@@ -740,6 +746,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
740 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */ 746 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
741 arr[1] = cmd[2]; /*sanity */ 747 arr[1] = cmd[2]; /*sanity */
742 arr[3] = inquiry_evpd_b0(&arr[4]); 748 arr[3] = inquiry_evpd_b0(&arr[4]);
749 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
750 arr[1] = cmd[2]; /*sanity */
751 arr[3] = inquiry_evpd_b1(&arr[4]);
743 } else { 752 } else {
744 /* Illegal request, invalid field in cdb */ 753 /* Illegal request, invalid field in cdb */
745 mk_sense_buffer(devip, ILLEGAL_REQUEST, 754 mk_sense_buffer(devip, ILLEGAL_REQUEST,
@@ -878,8 +887,8 @@ static int resp_readcap(struct scsi_cmnd * scp,
878 arr[2] = 0xff; 887 arr[2] = 0xff;
879 arr[3] = 0xff; 888 arr[3] = 0xff;
880 } 889 }
881 arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; 890 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
882 arr[7] = SECT_SIZE_PER(target) & 0xff; 891 arr[7] = scsi_debug_sector_size & 0xff;
883 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); 892 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
884} 893}
885 894
@@ -902,10 +911,10 @@ static int resp_readcap16(struct scsi_cmnd * scp,
902 capac = sdebug_capacity - 1; 911 capac = sdebug_capacity - 1;
903 for (k = 0; k < 8; ++k, capac >>= 8) 912 for (k = 0; k < 8; ++k, capac >>= 8)
904 arr[7 - k] = capac & 0xff; 913 arr[7 - k] = capac & 0xff;
905 arr[8] = (SECT_SIZE_PER(target) >> 24) & 0xff; 914 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
906 arr[9] = (SECT_SIZE_PER(target) >> 16) & 0xff; 915 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
907 arr[10] = (SECT_SIZE_PER(target) >> 8) & 0xff; 916 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
908 arr[11] = SECT_SIZE_PER(target) & 0xff; 917 arr[11] = scsi_debug_sector_size & 0xff;
909 return fill_from_dev_buffer(scp, arr, 918 return fill_from_dev_buffer(scp, arr,
910 min(alloc_len, SDEBUG_READCAP16_ARR_SZ)); 919 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
911} 920}
@@ -1019,20 +1028,20 @@ static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1019 1028
1020static int resp_format_pg(unsigned char * p, int pcontrol, int target) 1029static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1021{ /* Format device page for mode_sense */ 1030{ /* Format device page for mode_sense */
1022 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, 1031 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1023 0, 0, 0, 0, 0, 0, 0, 0, 1032 0, 0, 0, 0, 0, 0, 0, 0,
1024 0, 0, 0, 0, 0x40, 0, 0, 0}; 1033 0, 0, 0, 0, 0x40, 0, 0, 0};
1025 1034
1026 memcpy(p, format_pg, sizeof(format_pg)); 1035 memcpy(p, format_pg, sizeof(format_pg));
1027 p[10] = (sdebug_sectors_per >> 8) & 0xff; 1036 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1028 p[11] = sdebug_sectors_per & 0xff; 1037 p[11] = sdebug_sectors_per & 0xff;
1029 p[12] = (SECT_SIZE >> 8) & 0xff; 1038 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1030 p[13] = SECT_SIZE & 0xff; 1039 p[13] = scsi_debug_sector_size & 0xff;
1031 if (DEV_REMOVEABLE(target)) 1040 if (DEV_REMOVEABLE(target))
1032 p[20] |= 0x20; /* should agree with INQUIRY */ 1041 p[20] |= 0x20; /* should agree with INQUIRY */
1033 if (1 == pcontrol) 1042 if (1 == pcontrol)
1034 memset(p + 2, 0, sizeof(format_pg) - 2); 1043 memset(p + 2, 0, sizeof(format_pg) - 2);
1035 return sizeof(format_pg); 1044 return sizeof(format_pg);
1036} 1045}
1037 1046
1038static int resp_caching_pg(unsigned char * p, int pcontrol, int target) 1047static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
@@ -1206,8 +1215,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1206 ap[2] = (sdebug_capacity >> 8) & 0xff; 1215 ap[2] = (sdebug_capacity >> 8) & 0xff;
1207 ap[3] = sdebug_capacity & 0xff; 1216 ap[3] = sdebug_capacity & 0xff;
1208 } 1217 }
1209 ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; 1218 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1210 ap[7] = SECT_SIZE_PER(target) & 0xff; 1219 ap[7] = scsi_debug_sector_size & 0xff;
1211 offset += bd_len; 1220 offset += bd_len;
1212 ap = arr + offset; 1221 ap = arr + offset;
1213 } else if (16 == bd_len) { 1222 } else if (16 == bd_len) {
@@ -1215,10 +1224,10 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1215 1224
1216 for (k = 0; k < 8; ++k, capac >>= 8) 1225 for (k = 0; k < 8; ++k, capac >>= 8)
1217 ap[7 - k] = capac & 0xff; 1226 ap[7 - k] = capac & 0xff;
1218 ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff; 1227 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1219 ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff; 1228 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1220 ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff; 1229 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1221 ap[15] = SECT_SIZE_PER(target) & 0xff; 1230 ap[15] = scsi_debug_sector_size & 0xff;
1222 offset += bd_len; 1231 offset += bd_len;
1223 ap = arr + offset; 1232 ap = arr + offset;
1224 } 1233 }
@@ -1519,10 +1528,10 @@ static int do_device_access(struct scsi_cmnd *scmd,
1519 if (block + num > sdebug_store_sectors) 1528 if (block + num > sdebug_store_sectors)
1520 rest = block + num - sdebug_store_sectors; 1529 rest = block + num - sdebug_store_sectors;
1521 1530
1522 ret = func(scmd, fake_storep + (block * SECT_SIZE), 1531 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1523 (num - rest) * SECT_SIZE); 1532 (num - rest) * scsi_debug_sector_size);
1524 if (!ret && rest) 1533 if (!ret && rest)
1525 ret = func(scmd, fake_storep, rest * SECT_SIZE); 1534 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1526 1535
1527 return ret; 1536 return ret;
1528} 1537}
@@ -1575,10 +1584,10 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1575 write_unlock_irqrestore(&atomic_rw, iflags); 1584 write_unlock_irqrestore(&atomic_rw, iflags);
1576 if (-1 == ret) 1585 if (-1 == ret)
1577 return (DID_ERROR << 16); 1586 return (DID_ERROR << 16);
1578 else if ((ret < (num * SECT_SIZE)) && 1587 else if ((ret < (num * scsi_debug_sector_size)) &&
1579 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 1588 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1580 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " 1589 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
1581 " IO sent=%d bytes\n", num * SECT_SIZE, ret); 1590 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
1582 return 0; 1591 return 0;
1583} 1592}
1584 1593
@@ -2085,6 +2094,7 @@ module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2085module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); 2094module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2086module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, 2095module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2087 S_IRUGO | S_IWUSR); 2096 S_IRUGO | S_IWUSR);
2097module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2088 2098
2089MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2099MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2090MODULE_DESCRIPTION("SCSI debug adapter driver"); 2100MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2106,6 +2116,7 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2106MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2116MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2107MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 2117MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2108MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 2118MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2119MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)");
2109 2120
2110 2121
2111static char sdebug_info[256]; 2122static char sdebug_info[256];
@@ -2158,8 +2169,9 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta
2158 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth, 2169 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2159 scsi_debug_cmnd_count, scsi_debug_delay, 2170 scsi_debug_cmnd_count, scsi_debug_delay,
2160 scsi_debug_max_luns, scsi_debug_scsi_level, 2171 scsi_debug_max_luns, scsi_debug_scsi_level,
2161 SECT_SIZE, sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, 2172 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2162 num_aborts, num_dev_resets, num_bus_resets, num_host_resets); 2173 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2174 num_host_resets);
2163 if (pos < offset) { 2175 if (pos < offset) {
2164 len = 0; 2176 len = 0;
2165 begin = pos; 2177 begin = pos;
@@ -2434,6 +2446,12 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
2434DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show, 2446DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
2435 sdebug_vpd_use_hostno_store); 2447 sdebug_vpd_use_hostno_store);
2436 2448
2449static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
2450{
2451 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
2452}
2453DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
2454
2437/* Note: The following function creates attribute files in the 2455/* Note: The following function creates attribute files in the
2438 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 2456 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
2439 files (over those found in the /sys/module/scsi_debug/parameters 2457 files (over those found in the /sys/module/scsi_debug/parameters
@@ -2459,11 +2477,13 @@ static int do_create_driverfs_files(void)
2459 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2477 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2460 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); 2478 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2461 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); 2479 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2480 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
2462 return ret; 2481 return ret;
2463} 2482}
2464 2483
2465static void do_remove_driverfs_files(void) 2484static void do_remove_driverfs_files(void)
2466{ 2485{
2486 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
2467 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); 2487 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2468 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); 2488 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2469 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2489 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
@@ -2499,10 +2519,22 @@ static int __init scsi_debug_init(void)
2499 int k; 2519 int k;
2500 int ret; 2520 int ret;
2501 2521
2522 switch (scsi_debug_sector_size) {
2523 case 512:
2524 case 1024:
2525 case 2048:
2526 case 4096:
2527 break;
2528 default:
2529 printk(KERN_ERR "scsi_debug_init: invalid sector_size %u\n",
2530 scsi_debug_sector_size);
2531 return -EINVAL;
2532 }
2533
2502 if (scsi_debug_dev_size_mb < 1) 2534 if (scsi_debug_dev_size_mb < 1)
2503 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 2535 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
2504 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; 2536 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
2505 sdebug_store_sectors = sz / SECT_SIZE; 2537 sdebug_store_sectors = sz / scsi_debug_sector_size;
2506 sdebug_capacity = get_sdebug_capacity(); 2538 sdebug_capacity = get_sdebug_capacity();
2507 2539
2508 /* play around with geometry, don't waste too much on track 0 */ 2540 /* play around with geometry, don't waste too much on track 0 */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index eaf5a8add1ba..006a95916f72 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -298,6 +298,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
298 */ 298 */
299static int scsi_check_sense(struct scsi_cmnd *scmd) 299static int scsi_check_sense(struct scsi_cmnd *scmd)
300{ 300{
301 struct scsi_device *sdev = scmd->device;
301 struct scsi_sense_hdr sshdr; 302 struct scsi_sense_hdr sshdr;
302 303
303 if (! scsi_command_normalize_sense(scmd, &sshdr)) 304 if (! scsi_command_normalize_sense(scmd, &sshdr))
@@ -306,6 +307,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
306 if (scsi_sense_is_deferred(&sshdr)) 307 if (scsi_sense_is_deferred(&sshdr))
307 return NEEDS_RETRY; 308 return NEEDS_RETRY;
308 309
310 if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
311 sdev->scsi_dh_data->scsi_dh->check_sense) {
312 int rc;
313
314 rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
315 if (rc != SCSI_RETURN_NOT_HANDLED)
316 return rc;
317 /* handler does not care. Drop down to default handling */
318 }
319
309 /* 320 /*
310 * Previous logic looked for FILEMARK, EOM or ILI which are 321 * Previous logic looked for FILEMARK, EOM or ILI which are
311 * mainly associated with tapes and returned SUCCESS. 322 * mainly associated with tapes and returned SUCCESS.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index cbf55d59a54c..88d1b5f44e59 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
65}; 65};
66#undef SP 66#undef SP
67 67
68static struct kmem_cache *scsi_bidi_sdb_cache; 68static struct kmem_cache *scsi_sdb_cache;
69 69
70static void scsi_run_queue(struct request_queue *q); 70static void scsi_run_queue(struct request_queue *q);
71 71
@@ -784,7 +784,7 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
784 struct scsi_data_buffer *bidi_sdb = 784 struct scsi_data_buffer *bidi_sdb =
785 cmd->request->next_rq->special; 785 cmd->request->next_rq->special;
786 scsi_free_sgtable(bidi_sdb); 786 scsi_free_sgtable(bidi_sdb);
787 kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb); 787 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
788 cmd->request->next_rq->special = NULL; 788 cmd->request->next_rq->special = NULL;
789 } 789 }
790} 790}
@@ -1059,7 +1059,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1059 1059
1060 if (blk_bidi_rq(cmd->request)) { 1060 if (blk_bidi_rq(cmd->request)) {
1061 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 1061 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1062 scsi_bidi_sdb_cache, GFP_ATOMIC); 1062 scsi_sdb_cache, GFP_ATOMIC);
1063 if (!bidi_sdb) { 1063 if (!bidi_sdb) {
1064 error = BLKPREP_DEFER; 1064 error = BLKPREP_DEFER;
1065 goto err_exit; 1065 goto err_exit;
@@ -1169,6 +1169,14 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1169 1169
1170 if (ret != BLKPREP_OK) 1170 if (ret != BLKPREP_OK)
1171 return ret; 1171 return ret;
1172
1173 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1174 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1175 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1176 if (ret != BLKPREP_OK)
1177 return ret;
1178 }
1179
1172 /* 1180 /*
1173 * Filesystem requests must transfer data. 1181 * Filesystem requests must transfer data.
1174 */ 1182 */
@@ -1329,7 +1337,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1329 printk("scsi%d unblocking host at zero depth\n", 1337 printk("scsi%d unblocking host at zero depth\n",
1330 shost->host_no)); 1338 shost->host_no));
1331 } else { 1339 } else {
1332 blk_plug_device(q);
1333 return 0; 1340 return 0;
1334 } 1341 }
1335 } 1342 }
@@ -1693,11 +1700,11 @@ int __init scsi_init_queue(void)
1693 return -ENOMEM; 1700 return -ENOMEM;
1694 } 1701 }
1695 1702
1696 scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb", 1703 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1697 sizeof(struct scsi_data_buffer), 1704 sizeof(struct scsi_data_buffer),
1698 0, 0, NULL); 1705 0, 0, NULL);
1699 if (!scsi_bidi_sdb_cache) { 1706 if (!scsi_sdb_cache) {
1700 printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n"); 1707 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1701 goto cleanup_io_context; 1708 goto cleanup_io_context;
1702 } 1709 }
1703 1710
@@ -1710,7 +1717,7 @@ int __init scsi_init_queue(void)
1710 if (!sgp->slab) { 1717 if (!sgp->slab) {
1711 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1718 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1712 sgp->name); 1719 sgp->name);
1713 goto cleanup_bidi_sdb; 1720 goto cleanup_sdb;
1714 } 1721 }
1715 1722
1716 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1723 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
@@ -1718,13 +1725,13 @@ int __init scsi_init_queue(void)
1718 if (!sgp->pool) { 1725 if (!sgp->pool) {
1719 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1726 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1720 sgp->name); 1727 sgp->name);
1721 goto cleanup_bidi_sdb; 1728 goto cleanup_sdb;
1722 } 1729 }
1723 } 1730 }
1724 1731
1725 return 0; 1732 return 0;
1726 1733
1727cleanup_bidi_sdb: 1734cleanup_sdb:
1728 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1735 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1729 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1736 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1730 if (sgp->pool) 1737 if (sgp->pool)
@@ -1732,7 +1739,7 @@ cleanup_bidi_sdb:
1732 if (sgp->slab) 1739 if (sgp->slab)
1733 kmem_cache_destroy(sgp->slab); 1740 kmem_cache_destroy(sgp->slab);
1734 } 1741 }
1735 kmem_cache_destroy(scsi_bidi_sdb_cache); 1742 kmem_cache_destroy(scsi_sdb_cache);
1736cleanup_io_context: 1743cleanup_io_context:
1737 kmem_cache_destroy(scsi_io_context_cache); 1744 kmem_cache_destroy(scsi_io_context_cache);
1738 1745
@@ -1744,7 +1751,7 @@ void scsi_exit_queue(void)
1744 int i; 1751 int i;
1745 1752
1746 kmem_cache_destroy(scsi_io_context_cache); 1753 kmem_cache_destroy(scsi_io_context_cache);
1747 kmem_cache_destroy(scsi_bidi_sdb_cache); 1754 kmem_cache_destroy(scsi_sdb_cache);
1748 1755
1749 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1756 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1750 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1757 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a00eee6f7be9..196fe3af0d5e 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -346,7 +346,7 @@ static void scsi_target_dev_release(struct device *dev)
346 put_device(parent); 346 put_device(parent);
347} 347}
348 348
349struct device_type scsi_target_type = { 349static struct device_type scsi_target_type = {
350 .name = "scsi_target", 350 .name = "scsi_target",
351 .release = scsi_target_dev_release, 351 .release = scsi_target_dev_release,
352}; 352};
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 93d2b6714453..b6e561059779 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -439,6 +439,7 @@ struct bus_type scsi_bus_type = {
439 .resume = scsi_bus_resume, 439 .resume = scsi_bus_resume,
440 .remove = scsi_bus_remove, 440 .remove = scsi_bus_remove,
441}; 441};
442EXPORT_SYMBOL_GPL(scsi_bus_type);
442 443
443int scsi_sysfs_register(void) 444int scsi_sysfs_register(void)
444{ 445{
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 65d1737eb664..3af7cbcc5c5d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,10 +30,11 @@
30#include <scsi/scsi_transport_iscsi.h> 30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/iscsi_if.h> 31#include <scsi/iscsi_if.h>
32 32
33#define ISCSI_SESSION_ATTRS 19 33#define ISCSI_SESSION_ATTRS 21
34#define ISCSI_CONN_ATTRS 13 34#define ISCSI_CONN_ATTRS 13
35#define ISCSI_HOST_ATTRS 4 35#define ISCSI_HOST_ATTRS 4
36#define ISCSI_TRANSPORT_VERSION "2.0-869" 36
37#define ISCSI_TRANSPORT_VERSION "2.0-870"
37 38
38struct iscsi_internal { 39struct iscsi_internal {
39 int daemon_pid; 40 int daemon_pid;
@@ -101,16 +102,10 @@ show_transport_##name(struct device *dev, \
101static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); 102static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
102 103
103show_transport_attr(caps, "0x%x"); 104show_transport_attr(caps, "0x%x");
104show_transport_attr(max_lun, "%d");
105show_transport_attr(max_conn, "%d");
106show_transport_attr(max_cmd_len, "%d");
107 105
108static struct attribute *iscsi_transport_attrs[] = { 106static struct attribute *iscsi_transport_attrs[] = {
109 &dev_attr_handle.attr, 107 &dev_attr_handle.attr,
110 &dev_attr_caps.attr, 108 &dev_attr_caps.attr,
111 &dev_attr_max_lun.attr,
112 &dev_attr_max_conn.attr,
113 &dev_attr_max_cmd_len.attr,
114 NULL, 109 NULL,
115}; 110};
116 111
@@ -118,18 +113,139 @@ static struct attribute_group iscsi_transport_group = {
118 .attrs = iscsi_transport_attrs, 113 .attrs = iscsi_transport_attrs,
119}; 114};
120 115
116/*
117 * iSCSI endpoint attrs
118 */
119#define iscsi_dev_to_endpoint(_dev) \
120 container_of(_dev, struct iscsi_endpoint, dev)
121
122#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
123struct device_attribute dev_attr_##_prefix##_##_name = \
124 __ATTR(_name,_mode,_show,_store)
125
126static void iscsi_endpoint_release(struct device *dev)
127{
128 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
129 kfree(ep);
130}
131
132static struct class iscsi_endpoint_class = {
133 .name = "iscsi_endpoint",
134 .dev_release = iscsi_endpoint_release,
135};
136
137static ssize_t
138show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
139{
140 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
141 return sprintf(buf, "%u\n", ep->id);
142}
143static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
144
145static struct attribute *iscsi_endpoint_attrs[] = {
146 &dev_attr_ep_handle.attr,
147 NULL,
148};
149
150static struct attribute_group iscsi_endpoint_group = {
151 .attrs = iscsi_endpoint_attrs,
152};
121 153
154#define ISCSI_MAX_EPID -1
155
156static int iscsi_match_epid(struct device *dev, void *data)
157{
158 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
159 unsigned int *epid = (unsigned int *) data;
160
161 return *epid == ep->id;
162}
163
164struct iscsi_endpoint *
165iscsi_create_endpoint(int dd_size)
166{
167 struct device *dev;
168 struct iscsi_endpoint *ep;
169 unsigned int id;
170 int err;
171
172 for (id = 1; id < ISCSI_MAX_EPID; id++) {
173 dev = class_find_device(&iscsi_endpoint_class, &id,
174 iscsi_match_epid);
175 if (!dev)
176 break;
177 }
178 if (id == ISCSI_MAX_EPID) {
179 printk(KERN_ERR "Too many connections. Max supported %u\n",
180 ISCSI_MAX_EPID - 1);
181 return NULL;
182 }
183
184 ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
185 if (!ep)
186 return NULL;
187
188 ep->id = id;
189 ep->dev.class = &iscsi_endpoint_class;
190 snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
191 err = device_register(&ep->dev);
192 if (err)
193 goto free_ep;
194
195 err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
196 if (err)
197 goto unregister_dev;
198
199 if (dd_size)
200 ep->dd_data = &ep[1];
201 return ep;
202
203unregister_dev:
204 device_unregister(&ep->dev);
205 return NULL;
206
207free_ep:
208 kfree(ep);
209 return NULL;
210}
211EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
212
213void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
214{
215 sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
216 device_unregister(&ep->dev);
217}
218EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
219
220struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
221{
222 struct iscsi_endpoint *ep;
223 struct device *dev;
224
225 dev = class_find_device(&iscsi_endpoint_class, &handle,
226 iscsi_match_epid);
227 if (!dev)
228 return NULL;
229
230 ep = iscsi_dev_to_endpoint(dev);
231 /*
232 * we can drop this now because the interface will prevent
233 * removals and lookups from racing.
234 */
235 put_device(dev);
236 return ep;
237}
238EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
122 239
123static int iscsi_setup_host(struct transport_container *tc, struct device *dev, 240static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
124 struct device *cdev) 241 struct device *cdev)
125{ 242{
126 struct Scsi_Host *shost = dev_to_shost(dev); 243 struct Scsi_Host *shost = dev_to_shost(dev);
127 struct iscsi_host *ihost = shost->shost_data; 244 struct iscsi_cls_host *ihost = shost->shost_data;
128 245
129 memset(ihost, 0, sizeof(*ihost)); 246 memset(ihost, 0, sizeof(*ihost));
130 INIT_LIST_HEAD(&ihost->sessions);
131 mutex_init(&ihost->mutex);
132 atomic_set(&ihost->nr_scans, 0); 247 atomic_set(&ihost->nr_scans, 0);
248 mutex_init(&ihost->mutex);
133 249
134 snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d", 250 snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
135 shost->host_no); 251 shost->host_no);
@@ -144,7 +260,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
144 struct device *cdev) 260 struct device *cdev)
145{ 261{
146 struct Scsi_Host *shost = dev_to_shost(dev); 262 struct Scsi_Host *shost = dev_to_shost(dev);
147 struct iscsi_host *ihost = shost->shost_data; 263 struct iscsi_cls_host *ihost = shost->shost_data;
148 264
149 destroy_workqueue(ihost->scan_workq); 265 destroy_workqueue(ihost->scan_workq);
150 return 0; 266 return 0;
@@ -287,6 +403,24 @@ static int iscsi_is_session_dev(const struct device *dev)
287 return dev->release == iscsi_session_release; 403 return dev->release == iscsi_session_release;
288} 404}
289 405
406static int iscsi_iter_session_fn(struct device *dev, void *data)
407{
408 void (* fn) (struct iscsi_cls_session *) = data;
409
410 if (!iscsi_is_session_dev(dev))
411 return 0;
412 fn(iscsi_dev_to_session(dev));
413 return 0;
414}
415
416void iscsi_host_for_each_session(struct Scsi_Host *shost,
417 void (*fn)(struct iscsi_cls_session *))
418{
419 device_for_each_child(&shost->shost_gendev, fn,
420 iscsi_iter_session_fn);
421}
422EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
423
290/** 424/**
291 * iscsi_scan_finished - helper to report when running scans are done 425 * iscsi_scan_finished - helper to report when running scans are done
292 * @shost: scsi host 426 * @shost: scsi host
@@ -297,7 +431,7 @@ static int iscsi_is_session_dev(const struct device *dev)
297 */ 431 */
298int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time) 432int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
299{ 433{
300 struct iscsi_host *ihost = shost->shost_data; 434 struct iscsi_cls_host *ihost = shost->shost_data;
301 /* 435 /*
302 * qla4xxx will have kicked off some session unblocks before calling 436 * qla4xxx will have kicked off some session unblocks before calling
303 * scsi_scan_host, so just wait for them to complete. 437 * scsi_scan_host, so just wait for them to complete.
@@ -306,42 +440,76 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
306} 440}
307EXPORT_SYMBOL_GPL(iscsi_scan_finished); 441EXPORT_SYMBOL_GPL(iscsi_scan_finished);
308 442
309static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, 443struct iscsi_scan_data {
310 uint id, uint lun) 444 unsigned int channel;
445 unsigned int id;
446 unsigned int lun;
447};
448
449static int iscsi_user_scan_session(struct device *dev, void *data)
311{ 450{
312 struct iscsi_host *ihost = shost->shost_data; 451 struct iscsi_scan_data *scan_data = data;
313 struct iscsi_cls_session *session; 452 struct iscsi_cls_session *session;
453 struct Scsi_Host *shost;
454 struct iscsi_cls_host *ihost;
455 unsigned long flags;
456 unsigned int id;
457
458 if (!iscsi_is_session_dev(dev))
459 return 0;
460
461 session = iscsi_dev_to_session(dev);
462 shost = iscsi_session_to_shost(session);
463 ihost = shost->shost_data;
314 464
315 mutex_lock(&ihost->mutex); 465 mutex_lock(&ihost->mutex);
316 list_for_each_entry(session, &ihost->sessions, host_list) { 466 spin_lock_irqsave(&session->lock, flags);
317 if ((channel == SCAN_WILD_CARD || channel == 0) && 467 if (session->state != ISCSI_SESSION_LOGGED_IN) {
318 (id == SCAN_WILD_CARD || id == session->target_id)) 468 spin_unlock_irqrestore(&session->lock, flags);
319 scsi_scan_target(&session->dev, 0, 469 mutex_unlock(&ihost->mutex);
320 session->target_id, lun, 1); 470 return 0;
321 } 471 }
322 mutex_unlock(&ihost->mutex); 472 id = session->target_id;
473 spin_unlock_irqrestore(&session->lock, flags);
323 474
475 if (id != ISCSI_MAX_TARGET) {
476 if ((scan_data->channel == SCAN_WILD_CARD ||
477 scan_data->channel == 0) &&
478 (scan_data->id == SCAN_WILD_CARD ||
479 scan_data->id == id))
480 scsi_scan_target(&session->dev, 0, id,
481 scan_data->lun, 1);
482 }
483 mutex_unlock(&ihost->mutex);
324 return 0; 484 return 0;
325} 485}
326 486
487static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
488 uint id, uint lun)
489{
490 struct iscsi_scan_data scan_data;
491
492 scan_data.channel = channel;
493 scan_data.id = id;
494 scan_data.lun = lun;
495
496 return device_for_each_child(&shost->shost_gendev, &scan_data,
497 iscsi_user_scan_session);
498}
499
327static void iscsi_scan_session(struct work_struct *work) 500static void iscsi_scan_session(struct work_struct *work)
328{ 501{
329 struct iscsi_cls_session *session = 502 struct iscsi_cls_session *session =
330 container_of(work, struct iscsi_cls_session, scan_work); 503 container_of(work, struct iscsi_cls_session, scan_work);
331 struct Scsi_Host *shost = iscsi_session_to_shost(session); 504 struct Scsi_Host *shost = iscsi_session_to_shost(session);
332 struct iscsi_host *ihost = shost->shost_data; 505 struct iscsi_cls_host *ihost = shost->shost_data;
333 unsigned long flags; 506 struct iscsi_scan_data scan_data;
334 507
335 spin_lock_irqsave(&session->lock, flags); 508 scan_data.channel = 0;
336 if (session->state != ISCSI_SESSION_LOGGED_IN) { 509 scan_data.id = SCAN_WILD_CARD;
337 spin_unlock_irqrestore(&session->lock, flags); 510 scan_data.lun = SCAN_WILD_CARD;
338 goto done;
339 }
340 spin_unlock_irqrestore(&session->lock, flags);
341 511
342 scsi_scan_target(&session->dev, 0, session->target_id, 512 iscsi_user_scan_session(&session->dev, &scan_data);
343 SCAN_WILD_CARD, 1);
344done:
345 atomic_dec(&ihost->nr_scans); 513 atomic_dec(&ihost->nr_scans);
346} 514}
347 515
@@ -381,7 +549,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
381 container_of(work, struct iscsi_cls_session, 549 container_of(work, struct iscsi_cls_session,
382 unblock_work); 550 unblock_work);
383 struct Scsi_Host *shost = iscsi_session_to_shost(session); 551 struct Scsi_Host *shost = iscsi_session_to_shost(session);
384 struct iscsi_host *ihost = shost->shost_data; 552 struct iscsi_cls_host *ihost = shost->shost_data;
385 unsigned long flags; 553 unsigned long flags;
386 554
387 /* 555 /*
@@ -449,15 +617,19 @@ static void __iscsi_unbind_session(struct work_struct *work)
449 container_of(work, struct iscsi_cls_session, 617 container_of(work, struct iscsi_cls_session,
450 unbind_work); 618 unbind_work);
451 struct Scsi_Host *shost = iscsi_session_to_shost(session); 619 struct Scsi_Host *shost = iscsi_session_to_shost(session);
452 struct iscsi_host *ihost = shost->shost_data; 620 struct iscsi_cls_host *ihost = shost->shost_data;
621 unsigned long flags;
453 622
454 /* Prevent new scans and make sure scanning is not in progress */ 623 /* Prevent new scans and make sure scanning is not in progress */
455 mutex_lock(&ihost->mutex); 624 mutex_lock(&ihost->mutex);
456 if (list_empty(&session->host_list)) { 625 spin_lock_irqsave(&session->lock, flags);
626 if (session->target_id == ISCSI_MAX_TARGET) {
627 spin_unlock_irqrestore(&session->lock, flags);
457 mutex_unlock(&ihost->mutex); 628 mutex_unlock(&ihost->mutex);
458 return; 629 return;
459 } 630 }
460 list_del_init(&session->host_list); 631 session->target_id = ISCSI_MAX_TARGET;
632 spin_unlock_irqrestore(&session->lock, flags);
461 mutex_unlock(&ihost->mutex); 633 mutex_unlock(&ihost->mutex);
462 634
463 scsi_remove_target(&session->dev); 635 scsi_remove_target(&session->dev);
@@ -467,18 +639,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
467static int iscsi_unbind_session(struct iscsi_cls_session *session) 639static int iscsi_unbind_session(struct iscsi_cls_session *session)
468{ 640{
469 struct Scsi_Host *shost = iscsi_session_to_shost(session); 641 struct Scsi_Host *shost = iscsi_session_to_shost(session);
470 struct iscsi_host *ihost = shost->shost_data; 642 struct iscsi_cls_host *ihost = shost->shost_data;
471 643
472 return queue_work(ihost->scan_workq, &session->unbind_work); 644 return queue_work(ihost->scan_workq, &session->unbind_work);
473} 645}
474 646
475struct iscsi_cls_session * 647struct iscsi_cls_session *
476iscsi_alloc_session(struct Scsi_Host *shost, 648iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
477 struct iscsi_transport *transport) 649 int dd_size)
478{ 650{
479 struct iscsi_cls_session *session; 651 struct iscsi_cls_session *session;
480 652
481 session = kzalloc(sizeof(*session) + transport->sessiondata_size, 653 session = kzalloc(sizeof(*session) + dd_size,
482 GFP_KERNEL); 654 GFP_KERNEL);
483 if (!session) 655 if (!session)
484 return NULL; 656 return NULL;
@@ -487,7 +659,6 @@ iscsi_alloc_session(struct Scsi_Host *shost,
487 session->recovery_tmo = 120; 659 session->recovery_tmo = 120;
488 session->state = ISCSI_SESSION_FREE; 660 session->state = ISCSI_SESSION_FREE;
489 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 661 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
490 INIT_LIST_HEAD(&session->host_list);
491 INIT_LIST_HEAD(&session->sess_list); 662 INIT_LIST_HEAD(&session->sess_list);
492 INIT_WORK(&session->unblock_work, __iscsi_unblock_session); 663 INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
493 INIT_WORK(&session->block_work, __iscsi_block_session); 664 INIT_WORK(&session->block_work, __iscsi_block_session);
@@ -500,22 +671,57 @@ iscsi_alloc_session(struct Scsi_Host *shost,
500 session->dev.parent = &shost->shost_gendev; 671 session->dev.parent = &shost->shost_gendev;
501 session->dev.release = iscsi_session_release; 672 session->dev.release = iscsi_session_release;
502 device_initialize(&session->dev); 673 device_initialize(&session->dev);
503 if (transport->sessiondata_size) 674 if (dd_size)
504 session->dd_data = &session[1]; 675 session->dd_data = &session[1];
505 return session; 676 return session;
506} 677}
507EXPORT_SYMBOL_GPL(iscsi_alloc_session); 678EXPORT_SYMBOL_GPL(iscsi_alloc_session);
508 679
680static int iscsi_get_next_target_id(struct device *dev, void *data)
681{
682 struct iscsi_cls_session *session;
683 unsigned long flags;
684 int err = 0;
685
686 if (!iscsi_is_session_dev(dev))
687 return 0;
688
689 session = iscsi_dev_to_session(dev);
690 spin_lock_irqsave(&session->lock, flags);
691 if (*((unsigned int *) data) == session->target_id)
692 err = -EEXIST;
693 spin_unlock_irqrestore(&session->lock, flags);
694 return err;
695}
696
509int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) 697int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
510{ 698{
511 struct Scsi_Host *shost = iscsi_session_to_shost(session); 699 struct Scsi_Host *shost = iscsi_session_to_shost(session);
512 struct iscsi_host *ihost; 700 struct iscsi_cls_host *ihost;
513 unsigned long flags; 701 unsigned long flags;
702 unsigned int id = target_id;
514 int err; 703 int err;
515 704
516 ihost = shost->shost_data; 705 ihost = shost->shost_data;
517 session->sid = atomic_add_return(1, &iscsi_session_nr); 706 session->sid = atomic_add_return(1, &iscsi_session_nr);
518 session->target_id = target_id; 707
708 if (id == ISCSI_MAX_TARGET) {
709 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
710 err = device_for_each_child(&shost->shost_gendev, &id,
711 iscsi_get_next_target_id);
712 if (!err)
713 break;
714 }
715
716 if (id == ISCSI_MAX_TARGET) {
717 iscsi_cls_session_printk(KERN_ERR, session,
718 "Too many iscsi targets. Max "
719 "number of targets is %d.\n",
720 ISCSI_MAX_TARGET - 1);
721 goto release_host;
722 }
723 }
724 session->target_id = id;
519 725
520 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", 726 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
521 session->sid); 727 session->sid);
@@ -531,10 +737,6 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
531 list_add(&session->sess_list, &sesslist); 737 list_add(&session->sess_list, &sesslist);
532 spin_unlock_irqrestore(&sesslock, flags); 738 spin_unlock_irqrestore(&sesslock, flags);
533 739
534 mutex_lock(&ihost->mutex);
535 list_add(&session->host_list, &ihost->sessions);
536 mutex_unlock(&ihost->mutex);
537
538 iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); 740 iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
539 return 0; 741 return 0;
540 742
@@ -548,18 +750,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
548 * iscsi_create_session - create iscsi class session 750 * iscsi_create_session - create iscsi class session
549 * @shost: scsi host 751 * @shost: scsi host
550 * @transport: iscsi transport 752 * @transport: iscsi transport
753 * @dd_size: private driver data size
551 * @target_id: which target 754 * @target_id: which target
552 * 755 *
553 * This can be called from a LLD or iscsi_transport. 756 * This can be called from a LLD or iscsi_transport.
554 */ 757 */
555struct iscsi_cls_session * 758struct iscsi_cls_session *
556iscsi_create_session(struct Scsi_Host *shost, 759iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
557 struct iscsi_transport *transport, 760 int dd_size, unsigned int target_id)
558 unsigned int target_id)
559{ 761{
560 struct iscsi_cls_session *session; 762 struct iscsi_cls_session *session;
561 763
562 session = iscsi_alloc_session(shost, transport); 764 session = iscsi_alloc_session(shost, transport, dd_size);
563 if (!session) 765 if (!session)
564 return NULL; 766 return NULL;
565 767
@@ -595,7 +797,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
595void iscsi_remove_session(struct iscsi_cls_session *session) 797void iscsi_remove_session(struct iscsi_cls_session *session)
596{ 798{
597 struct Scsi_Host *shost = iscsi_session_to_shost(session); 799 struct Scsi_Host *shost = iscsi_session_to_shost(session);
598 struct iscsi_host *ihost = shost->shost_data; 800 struct iscsi_cls_host *ihost = shost->shost_data;
599 unsigned long flags; 801 unsigned long flags;
600 int err; 802 int err;
601 803
@@ -661,6 +863,7 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
661/** 863/**
662 * iscsi_create_conn - create iscsi class connection 864 * iscsi_create_conn - create iscsi class connection
663 * @session: iscsi cls session 865 * @session: iscsi cls session
866 * @dd_size: private driver data size
664 * @cid: connection id 867 * @cid: connection id
665 * 868 *
666 * This can be called from a LLD or iscsi_transport. The connection 869 * This can be called from a LLD or iscsi_transport. The connection
@@ -673,18 +876,17 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
673 * non-zero. 876 * non-zero.
674 */ 877 */
675struct iscsi_cls_conn * 878struct iscsi_cls_conn *
676iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid) 879iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
677{ 880{
678 struct iscsi_transport *transport = session->transport; 881 struct iscsi_transport *transport = session->transport;
679 struct iscsi_cls_conn *conn; 882 struct iscsi_cls_conn *conn;
680 unsigned long flags; 883 unsigned long flags;
681 int err; 884 int err;
682 885
683 conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL); 886 conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
684 if (!conn) 887 if (!conn)
685 return NULL; 888 return NULL;
686 889 if (dd_size)
687 if (transport->conndata_size)
688 conn->dd_data = &conn[1]; 890 conn->dd_data = &conn[1];
689 891
690 INIT_LIST_HEAD(&conn->conn_list); 892 INIT_LIST_HEAD(&conn->conn_list);
@@ -1017,21 +1219,20 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1017EXPORT_SYMBOL_GPL(iscsi_session_event); 1219EXPORT_SYMBOL_GPL(iscsi_session_event);
1018 1220
1019static int 1221static int
1020iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 1222iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
1223 struct iscsi_uevent *ev, uint32_t initial_cmdsn,
1224 uint16_t cmds_max, uint16_t queue_depth)
1021{ 1225{
1022 struct iscsi_transport *transport = priv->iscsi_transport; 1226 struct iscsi_transport *transport = priv->iscsi_transport;
1023 struct iscsi_cls_session *session; 1227 struct iscsi_cls_session *session;
1024 uint32_t hostno; 1228 uint32_t host_no;
1025 1229
1026 session = transport->create_session(transport, &priv->t, 1230 session = transport->create_session(ep, cmds_max, queue_depth,
1027 ev->u.c_session.cmds_max, 1231 initial_cmdsn, &host_no);
1028 ev->u.c_session.queue_depth,
1029 ev->u.c_session.initial_cmdsn,
1030 &hostno);
1031 if (!session) 1232 if (!session)
1032 return -ENOMEM; 1233 return -ENOMEM;
1033 1234
1034 ev->r.c_session_ret.host_no = hostno; 1235 ev->r.c_session_ret.host_no = host_no;
1035 ev->r.c_session_ret.sid = session->sid; 1236 ev->r.c_session_ret.sid = session->sid;
1036 return 0; 1237 return 0;
1037} 1238}
@@ -1106,6 +1307,7 @@ static int
1106iscsi_if_transport_ep(struct iscsi_transport *transport, 1307iscsi_if_transport_ep(struct iscsi_transport *transport,
1107 struct iscsi_uevent *ev, int msg_type) 1308 struct iscsi_uevent *ev, int msg_type)
1108{ 1309{
1310 struct iscsi_endpoint *ep;
1109 struct sockaddr *dst_addr; 1311 struct sockaddr *dst_addr;
1110 int rc = 0; 1312 int rc = 0;
1111 1313
@@ -1115,22 +1317,33 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
1115 return -EINVAL; 1317 return -EINVAL;
1116 1318
1117 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); 1319 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1118 rc = transport->ep_connect(dst_addr, 1320 ep = transport->ep_connect(dst_addr,
1119 ev->u.ep_connect.non_blocking, 1321 ev->u.ep_connect.non_blocking);
1120 &ev->r.ep_connect_ret.handle); 1322 if (IS_ERR(ep))
1323 return PTR_ERR(ep);
1324
1325 ev->r.ep_connect_ret.handle = ep->id;
1121 break; 1326 break;
1122 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1327 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1123 if (!transport->ep_poll) 1328 if (!transport->ep_poll)
1124 return -EINVAL; 1329 return -EINVAL;
1125 1330
1126 ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle, 1331 ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
1332 if (!ep)
1333 return -EINVAL;
1334
1335 ev->r.retcode = transport->ep_poll(ep,
1127 ev->u.ep_poll.timeout_ms); 1336 ev->u.ep_poll.timeout_ms);
1128 break; 1337 break;
1129 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: 1338 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
1130 if (!transport->ep_disconnect) 1339 if (!transport->ep_disconnect)
1131 return -EINVAL; 1340 return -EINVAL;
1132 1341
1133 transport->ep_disconnect(ev->u.ep_disconnect.ep_handle); 1342 ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
1343 if (!ep)
1344 return -EINVAL;
1345
1346 transport->ep_disconnect(ep);
1134 break; 1347 break;
1135 } 1348 }
1136 return rc; 1349 return rc;
@@ -1195,6 +1408,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1195 struct iscsi_internal *priv; 1408 struct iscsi_internal *priv;
1196 struct iscsi_cls_session *session; 1409 struct iscsi_cls_session *session;
1197 struct iscsi_cls_conn *conn; 1410 struct iscsi_cls_conn *conn;
1411 struct iscsi_endpoint *ep = NULL;
1198 1412
1199 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 1413 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
1200 if (!priv) 1414 if (!priv)
@@ -1208,7 +1422,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1208 1422
1209 switch (nlh->nlmsg_type) { 1423 switch (nlh->nlmsg_type) {
1210 case ISCSI_UEVENT_CREATE_SESSION: 1424 case ISCSI_UEVENT_CREATE_SESSION:
1211 err = iscsi_if_create_session(priv, ev); 1425 err = iscsi_if_create_session(priv, ep, ev,
1426 ev->u.c_session.initial_cmdsn,
1427 ev->u.c_session.cmds_max,
1428 ev->u.c_session.queue_depth);
1429 break;
1430 case ISCSI_UEVENT_CREATE_BOUND_SESSION:
1431 ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
1432 if (!ep) {
1433 err = -EINVAL;
1434 break;
1435 }
1436
1437 err = iscsi_if_create_session(priv, ep, ev,
1438 ev->u.c_bound_session.initial_cmdsn,
1439 ev->u.c_bound_session.cmds_max,
1440 ev->u.c_bound_session.queue_depth);
1212 break; 1441 break;
1213 case ISCSI_UEVENT_DESTROY_SESSION: 1442 case ISCSI_UEVENT_DESTROY_SESSION:
1214 session = iscsi_session_lookup(ev->u.d_session.sid); 1443 session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -1414,6 +1643,8 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
1414iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); 1643iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
1415iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); 1644iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
1416iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); 1645iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
1646iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
1647iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
1417 1648
1418static ssize_t 1649static ssize_t
1419show_priv_session_state(struct device *dev, struct device_attribute *attr, 1650show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -1580,6 +1811,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
1580 priv->daemon_pid = -1; 1811 priv->daemon_pid = -1;
1581 priv->iscsi_transport = tt; 1812 priv->iscsi_transport = tt;
1582 priv->t.user_scan = iscsi_user_scan; 1813 priv->t.user_scan = iscsi_user_scan;
1814 if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
1815 priv->t.create_work_queue = 1;
1583 1816
1584 priv->dev.class = &iscsi_transport_class; 1817 priv->dev.class = &iscsi_transport_class;
1585 snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name); 1818 snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
@@ -1595,7 +1828,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1595 priv->t.host_attrs.ac.attrs = &priv->host_attrs[0]; 1828 priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
1596 priv->t.host_attrs.ac.class = &iscsi_host_class.class; 1829 priv->t.host_attrs.ac.class = &iscsi_host_class.class;
1597 priv->t.host_attrs.ac.match = iscsi_host_match; 1830 priv->t.host_attrs.ac.match = iscsi_host_match;
1598 priv->t.host_size = sizeof(struct iscsi_host); 1831 priv->t.host_size = sizeof(struct iscsi_cls_host);
1599 transport_container_register(&priv->t.host_attrs); 1832 transport_container_register(&priv->t.host_attrs);
1600 1833
1601 SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME); 1834 SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
@@ -1653,6 +1886,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
1653 SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT); 1886 SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
1654 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); 1887 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
1655 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); 1888 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
1889 SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
1890 SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
1656 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 1891 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
1657 SETUP_PRIV_SESSION_RD_ATTR(state); 1892 SETUP_PRIV_SESSION_RD_ATTR(state);
1658 1893
@@ -1668,6 +1903,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1668 1903
1669unregister_dev: 1904unregister_dev:
1670 device_unregister(&priv->dev); 1905 device_unregister(&priv->dev);
1906 return NULL;
1671free_priv: 1907free_priv:
1672 kfree(priv); 1908 kfree(priv);
1673 return NULL; 1909 return NULL;
@@ -1715,10 +1951,14 @@ static __init int iscsi_transport_init(void)
1715 if (err) 1951 if (err)
1716 return err; 1952 return err;
1717 1953
1718 err = transport_class_register(&iscsi_host_class); 1954 err = class_register(&iscsi_endpoint_class);
1719 if (err) 1955 if (err)
1720 goto unregister_transport_class; 1956 goto unregister_transport_class;
1721 1957
1958 err = transport_class_register(&iscsi_host_class);
1959 if (err)
1960 goto unregister_endpoint_class;
1961
1722 err = transport_class_register(&iscsi_connection_class); 1962 err = transport_class_register(&iscsi_connection_class);
1723 if (err) 1963 if (err)
1724 goto unregister_host_class; 1964 goto unregister_host_class;
@@ -1727,8 +1967,8 @@ static __init int iscsi_transport_init(void)
1727 if (err) 1967 if (err)
1728 goto unregister_conn_class; 1968 goto unregister_conn_class;
1729 1969
1730 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL, 1970 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
1731 THIS_MODULE); 1971 NULL, THIS_MODULE);
1732 if (!nls) { 1972 if (!nls) {
1733 err = -ENOBUFS; 1973 err = -ENOBUFS;
1734 goto unregister_session_class; 1974 goto unregister_session_class;
@@ -1748,6 +1988,8 @@ unregister_conn_class:
1748 transport_class_unregister(&iscsi_connection_class); 1988 transport_class_unregister(&iscsi_connection_class);
1749unregister_host_class: 1989unregister_host_class:
1750 transport_class_unregister(&iscsi_host_class); 1990 transport_class_unregister(&iscsi_host_class);
1991unregister_endpoint_class:
1992 class_unregister(&iscsi_endpoint_class);
1751unregister_transport_class: 1993unregister_transport_class:
1752 class_unregister(&iscsi_transport_class); 1994 class_unregister(&iscsi_transport_class);
1753 return err; 1995 return err;
@@ -1760,6 +2002,7 @@ static void __exit iscsi_transport_exit(void)
1760 transport_class_unregister(&iscsi_connection_class); 2002 transport_class_unregister(&iscsi_connection_class);
1761 transport_class_unregister(&iscsi_session_class); 2003 transport_class_unregister(&iscsi_session_class);
1762 transport_class_unregister(&iscsi_host_class); 2004 transport_class_unregister(&iscsi_host_class);
2005 class_unregister(&iscsi_endpoint_class);
1763 class_unregister(&iscsi_transport_class); 2006 class_unregister(&iscsi_transport_class);
1764} 2007}
1765 2008
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d53312c42547..0c63947d8a9d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -58,8 +58,8 @@
58#include <scsi/scsi_host.h> 58#include <scsi/scsi_host.h>
59#include <scsi/scsi_ioctl.h> 59#include <scsi/scsi_ioctl.h>
60#include <scsi/scsicam.h> 60#include <scsi/scsicam.h>
61#include <scsi/sd.h>
62 61
62#include "sd.h"
63#include "scsi_logging.h" 63#include "scsi_logging.h"
64 64
65MODULE_AUTHOR("Eric Youngdale"); 65MODULE_AUTHOR("Eric Youngdale");
@@ -295,11 +295,6 @@ static int sd_major(int major_idx)
295 } 295 }
296} 296}
297 297
298static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
299{
300 return container_of(disk->private_data, struct scsi_disk, driver);
301}
302
303static struct scsi_disk *__scsi_disk_get(struct gendisk *disk) 298static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
304{ 299{
305 struct scsi_disk *sdkp = NULL; 300 struct scsi_disk *sdkp = NULL;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
new file mode 100644
index 000000000000..03a3d45cfa42
--- /dev/null
+++ b/drivers/scsi/sd.h
@@ -0,0 +1,62 @@
1#ifndef _SCSI_DISK_H
2#define _SCSI_DISK_H
3
4/*
5 * More than enough for everybody ;) The huge number of majors
6 * is a leftover from 16bit dev_t days, we don't really need that
7 * much numberspace.
8 */
9#define SD_MAJORS 16
10
11/*
12 * This is limited by the naming scheme enforced in sd_probe,
13 * add another character to it if you really need more disks.
14 */
15#define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26)
16
17/*
18 * Time out in seconds for disks and Magneto-opticals (which are slower).
19 */
20#define SD_TIMEOUT (30 * HZ)
21#define SD_MOD_TIMEOUT (75 * HZ)
22
23/*
24 * Number of allowed retries
25 */
26#define SD_MAX_RETRIES 5
27#define SD_PASSTHROUGH_RETRIES 1
28
29/*
30 * Size of the initial data buffer for mode and read capacity data
31 */
32#define SD_BUF_SIZE 512
33
34struct scsi_disk {
35 struct scsi_driver *driver; /* always &sd_template */
36 struct scsi_device *device;
37 struct device dev;
38 struct gendisk *disk;
39 unsigned int openers; /* protected by BKL for now, yuck */
40 sector_t capacity; /* size in 512-byte sectors */
41 u32 index;
42 u8 media_present;
43 u8 write_prot;
44 unsigned previous_state : 1;
45 unsigned WCE : 1; /* state of disk WCE bit */
46 unsigned RCD : 1; /* state of disk RCD bit, unused */
47 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
48};
49#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
50
51static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
52{
53 return container_of(disk->private_data, struct scsi_disk, driver);
54}
55
56#define sd_printk(prefix, sdsk, fmt, a...) \
57 (sdsk)->disk ? \
58 sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \
59 (sdsk)->disk->disk_name, ##a) : \
60 sdev_printk(prefix, (sdsk)->device, fmt, ##a)
61
62#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index fccd2e88d600..d3b8ebb83776 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1036,6 +1036,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
1036 case SG_SCSI_RESET_DEVICE: 1036 case SG_SCSI_RESET_DEVICE:
1037 val = SCSI_TRY_RESET_DEVICE; 1037 val = SCSI_TRY_RESET_DEVICE;
1038 break; 1038 break;
1039 case SG_SCSI_RESET_TARGET:
1040 val = SCSI_TRY_RESET_TARGET;
1041 break;
1039 case SG_SCSI_RESET_BUS: 1042 case SG_SCSI_RESET_BUS:
1040 val = SCSI_TRY_RESET_BUS; 1043 val = SCSI_TRY_RESET_BUS;
1041 break; 1044 break;
diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h
index 0433d5d0caf3..430537183c18 100644
--- a/drivers/scsi/sym53c8xx_2/sym_misc.h
+++ b/drivers/scsi/sym53c8xx_2/sym_misc.h
@@ -121,9 +121,7 @@ static __inline void sym_que_move(struct sym_quehead *orig,
121 } 121 }
122} 122}
123 123
124#define sym_que_entry(ptr, type, member) \ 124#define sym_que_entry(ptr, type, member) container_of(ptr, type, member)
125 ((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
126
127 125
128#define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) 126#define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)
129 127
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 97c68d021d28..638b68649e79 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -383,21 +383,14 @@ static int __devinit check_name(char *name)
383 return 0; 383 return 0;
384} 384}
385 385
386static int __devinit check_resources(struct pnp_option *option) 386static int __devinit check_resources(struct pnp_dev *dev)
387{ 387{
388 struct pnp_option *tmp; 388 resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8};
389 if (!option) 389 int i;
390 return 0;
391 390
392 for (tmp = option; tmp; tmp = tmp->next) { 391 for (i = 0; i < ARRAY_SIZE(base); i++) {
393 struct pnp_port *port; 392 if (pnp_possible_config(dev, IORESOURCE_IO, base[i], 8))
394 for (port = tmp->port; port; port = port->next) 393 return 1;
395 if ((port->size == 8) &&
396 ((port->min == 0x2f8) ||
397 (port->min == 0x3f8) ||
398 (port->min == 0x2e8) ||
399 (port->min == 0x3e8)))
400 return 1;
401 } 394 }
402 395
403 return 0; 396 return 0;
@@ -420,10 +413,7 @@ static int __devinit serial_pnp_guess_board(struct pnp_dev *dev, int *flags)
420 (dev->card && check_name(dev->card->name)))) 413 (dev->card && check_name(dev->card->name))))
421 return -ENODEV; 414 return -ENODEV;
422 415
423 if (check_resources(dev->independent)) 416 if (check_resources(dev))
424 return 0;
425
426 if (check_resources(dev->dependent))
427 return 0; 417 return 0;
428 418
429 return -ENODEV; 419 return -ENODEV;
diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/serial/cpm_uart/cpm_uart.h
index 0cc39f82d7c5..5c76e0ae0582 100644
--- a/drivers/serial/cpm_uart/cpm_uart.h
+++ b/drivers/serial/cpm_uart/cpm_uart.h
@@ -6,7 +6,7 @@
6 * Copyright (C) 2004 Freescale Semiconductor, Inc. 6 * Copyright (C) 2004 Freescale Semiconductor, Inc.
7 * 7 *
8 * 2006 (c) MontaVista Software, Inc. 8 * 2006 (c) MontaVista Software, Inc.
9 * Vitaly Bordug <vbordug@ru.mvista.com> 9 * Vitaly Bordug <vbordug@ru.mvista.com>
10 * 10 *
11 * This file is licensed under the terms of the GNU General Public License 11 * This file is licensed under the terms of the GNU General Public License
12 * version 2. This program is licensed "as is" without any warranty of any 12 * version 2. This program is licensed "as is" without any warranty of any
@@ -28,7 +28,7 @@
28#define SERIAL_CPM_MAJOR 204 28#define SERIAL_CPM_MAJOR 204
29#define SERIAL_CPM_MINOR 46 29#define SERIAL_CPM_MINOR 46
30 30
31#define IS_SMC(pinfo) (pinfo->flags & FLAG_SMC) 31#define IS_SMC(pinfo) (pinfo->flags & FLAG_SMC)
32#define IS_DISCARDING(pinfo) (pinfo->flags & FLAG_DISCARDING) 32#define IS_DISCARDING(pinfo) (pinfo->flags & FLAG_DISCARDING)
33#define FLAG_DISCARDING 0x00000004 /* when set, don't discard */ 33#define FLAG_DISCARDING 0x00000004 /* when set, don't discard */
34#define FLAG_SMC 0x00000002 34#define FLAG_SMC 0x00000002
@@ -70,7 +70,7 @@ struct uart_cpm_port {
70 void (*set_lineif)(struct uart_cpm_port *); 70 void (*set_lineif)(struct uart_cpm_port *);
71 u8 brg; 71 u8 brg;
72 uint dp_addr; 72 uint dp_addr;
73 void *mem_addr; 73 void *mem_addr;
74 dma_addr_t dma_addr; 74 dma_addr_t dma_addr;
75 u32 mem_size; 75 u32 mem_size;
76 /* helpers */ 76 /* helpers */
@@ -79,14 +79,11 @@ struct uart_cpm_port {
79 /* Keep track of 'odd' SMC2 wirings */ 79 /* Keep track of 'odd' SMC2 wirings */
80 int is_portb; 80 int is_portb;
81 /* wait on close if needed */ 81 /* wait on close if needed */
82 int wait_closing; 82 int wait_closing;
83 /* value to combine with opcode to form cpm command */ 83 /* value to combine with opcode to form cpm command */
84 u32 command; 84 u32 command;
85}; 85};
86 86
87#ifndef CONFIG_PPC_CPM_NEW_BINDING
88extern int cpm_uart_port_map[UART_NR];
89#endif
90extern int cpm_uart_nr; 87extern int cpm_uart_nr;
91extern struct uart_cpm_port cpm_uart_ports[UART_NR]; 88extern struct uart_cpm_port cpm_uart_ports[UART_NR];
92 89
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index a19dc7ef8861..abe129cc927a 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -13,7 +13,7 @@
13 * Copyright (C) 2004, 2007 Freescale Semiconductor, Inc. 13 * Copyright (C) 2004, 2007 Freescale Semiconductor, Inc.
14 * (C) 2004 Intracom, S.A. 14 * (C) 2004 Intracom, S.A.
15 * (C) 2005-2006 MontaVista Software, Inc. 15 * (C) 2005-2006 MontaVista Software, Inc.
16 * Vitaly Bordug <vbordug@ru.mvista.com> 16 * Vitaly Bordug <vbordug@ru.mvista.com>
17 * 17 *
18 * This program is free software; you can redistribute it and/or modify 18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by 19 * it under the terms of the GNU General Public License as published by
@@ -42,6 +42,7 @@
42#include <linux/bootmem.h> 42#include <linux/bootmem.h>
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/fs_uart_pd.h> 44#include <linux/fs_uart_pd.h>
45#include <linux/of_platform.h>
45 46
46#include <asm/io.h> 47#include <asm/io.h>
47#include <asm/irq.h> 48#include <asm/irq.h>
@@ -49,10 +50,6 @@
49#include <asm/fs_pd.h> 50#include <asm/fs_pd.h>
50#include <asm/udbg.h> 51#include <asm/udbg.h>
51 52
52#ifdef CONFIG_PPC_CPM_NEW_BINDING
53#include <linux/of_platform.h>
54#endif
55
56#if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 53#if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
57#define SUPPORT_SYSRQ 54#define SUPPORT_SYSRQ
58#endif 55#endif
@@ -72,59 +69,6 @@ static void cpm_uart_initbd(struct uart_cpm_port *pinfo);
72 69
73/**************************************************************/ 70/**************************************************************/
74 71
75#ifndef CONFIG_PPC_CPM_NEW_BINDING
76/* Track which ports are configured as uarts */
77int cpm_uart_port_map[UART_NR];
78/* How many ports did we config as uarts */
79int cpm_uart_nr;
80
81/* Place-holder for board-specific stuff */
82struct platform_device* __attribute__ ((weak)) __init
83early_uart_get_pdev(int index)
84{
85 return NULL;
86}
87
88
89static void cpm_uart_count(void)
90{
91 cpm_uart_nr = 0;
92#ifdef CONFIG_SERIAL_CPM_SMC1
93 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1;
94#endif
95#ifdef CONFIG_SERIAL_CPM_SMC2
96 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2;
97#endif
98#ifdef CONFIG_SERIAL_CPM_SCC1
99 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1;
100#endif
101#ifdef CONFIG_SERIAL_CPM_SCC2
102 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2;
103#endif
104#ifdef CONFIG_SERIAL_CPM_SCC3
105 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3;
106#endif
107#ifdef CONFIG_SERIAL_CPM_SCC4
108 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4;
109#endif
110}
111
112/* Get UART number by its id */
113static int cpm_uart_id2nr(int id)
114{
115 int i;
116 if (id < UART_NR) {
117 for (i=0; i<UART_NR; i++) {
118 if (cpm_uart_port_map[i] == id)
119 return i;
120 }
121 }
122
123 /* not found or invalid argument */
124 return -1;
125}
126#endif
127
128/* 72/*
129 * Check, if transmit buffers are processed 73 * Check, if transmit buffers are processed
130*/ 74*/
@@ -547,6 +491,11 @@ static void cpm_uart_set_termios(struct uart_port *port,
547 } 491 }
548 492
549 /* 493 /*
494 * Update the timeout
495 */
496 uart_update_timeout(port, termios->c_cflag, baud);
497
498 /*
550 * Set up parity check flag 499 * Set up parity check flag
551 */ 500 */
552#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) 501#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
@@ -935,7 +884,6 @@ static struct uart_ops cpm_uart_pops = {
935 .verify_port = cpm_uart_verify_port, 884 .verify_port = cpm_uart_verify_port,
936}; 885};
937 886
938#ifdef CONFIG_PPC_CPM_NEW_BINDING
939struct uart_cpm_port cpm_uart_ports[UART_NR]; 887struct uart_cpm_port cpm_uart_ports[UART_NR];
940 888
941static int cpm_uart_init_port(struct device_node *np, 889static int cpm_uart_init_port(struct device_node *np,
@@ -995,6 +943,7 @@ static int cpm_uart_init_port(struct device_node *np,
995 pinfo->port.type = PORT_CPM; 943 pinfo->port.type = PORT_CPM;
996 pinfo->port.ops = &cpm_uart_pops, 944 pinfo->port.ops = &cpm_uart_pops,
997 pinfo->port.iotype = UPIO_MEM; 945 pinfo->port.iotype = UPIO_MEM;
946 pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize;
998 spin_lock_init(&pinfo->port.lock); 947 spin_lock_init(&pinfo->port.lock);
999 948
1000 pinfo->port.irq = of_irq_to_resource(np, 0, NULL); 949 pinfo->port.irq = of_irq_to_resource(np, 0, NULL);
@@ -1012,153 +961,6 @@ out_mem:
1012 return ret; 961 return ret;
1013} 962}
1014 963
1015#else
1016
1017struct uart_cpm_port cpm_uart_ports[UART_NR] = {
1018 [UART_SMC1] = {
1019 .port = {
1020 .irq = SMC1_IRQ,
1021 .ops = &cpm_uart_pops,
1022 .iotype = UPIO_MEM,
1023 .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SMC1].port.lock),
1024 },
1025 .flags = FLAG_SMC,
1026 .tx_nrfifos = TX_NUM_FIFO,
1027 .tx_fifosize = TX_BUF_SIZE,
1028 .rx_nrfifos = RX_NUM_FIFO,
1029 .rx_fifosize = RX_BUF_SIZE,
1030 .set_lineif = smc1_lineif,
1031 },
1032 [UART_SMC2] = {
1033 .port = {
1034 .irq = SMC2_IRQ,
1035 .ops = &cpm_uart_pops,
1036 .iotype = UPIO_MEM,
1037 .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SMC2].port.lock),
1038 },
1039 .flags = FLAG_SMC,
1040 .tx_nrfifos = TX_NUM_FIFO,
1041 .tx_fifosize = TX_BUF_SIZE,
1042 .rx_nrfifos = RX_NUM_FIFO,
1043 .rx_fifosize = RX_BUF_SIZE,
1044 .set_lineif = smc2_lineif,
1045#ifdef CONFIG_SERIAL_CPM_ALT_SMC2
1046 .is_portb = 1,
1047#endif
1048 },
1049 [UART_SCC1] = {
1050 .port = {
1051 .irq = SCC1_IRQ,
1052 .ops = &cpm_uart_pops,
1053 .iotype = UPIO_MEM,
1054 .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SCC1].port.lock),
1055 },
1056 .tx_nrfifos = TX_NUM_FIFO,
1057 .tx_fifosize = TX_BUF_SIZE,
1058 .rx_nrfifos = RX_NUM_FIFO,
1059 .rx_fifosize = RX_BUF_SIZE,
1060 .set_lineif = scc1_lineif,
1061 .wait_closing = SCC_WAIT_CLOSING,
1062 },
1063 [UART_SCC2] = {
1064 .port = {
1065 .irq = SCC2_IRQ,
1066 .ops = &cpm_uart_pops,
1067 .iotype = UPIO_MEM,
1068 .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SCC2].port.lock),
1069 },
1070 .tx_nrfifos = TX_NUM_FIFO,
1071 .tx_fifosize = TX_BUF_SIZE,
1072 .rx_nrfifos = RX_NUM_FIFO,
1073 .rx_fifosize = RX_BUF_SIZE,
1074 .set_lineif = scc2_lineif,
1075 .wait_closing = SCC_WAIT_CLOSING,
1076 },
1077 [UART_SCC3] = {
1078 .port = {
1079 .irq = SCC3_IRQ,
1080 .ops = &cpm_uart_pops,
1081 .iotype = UPIO_MEM,
1082 .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SCC3].port.lock),
1083 },
1084 .tx_nrfifos = TX_NUM_FIFO,
1085 .tx_fifosize = TX_BUF_SIZE,
1086 .rx_nrfifos = RX_NUM_FIFO,
1087 .rx_fifosize = RX_BUF_SIZE,
1088 .set_lineif = scc3_lineif,
1089 .wait_closing = SCC_WAIT_CLOSING,
1090 },
1091 [UART_SCC4] = {
1092 .port = {
1093 .irq = SCC4_IRQ,
1094 .ops = &cpm_uart_pops,
1095 .iotype = UPIO_MEM,
1096 .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SCC4].port.lock),
1097 },
1098 .tx_nrfifos = TX_NUM_FIFO,
1099 .tx_fifosize = TX_BUF_SIZE,
1100 .rx_nrfifos = RX_NUM_FIFO,
1101 .rx_fifosize = RX_BUF_SIZE,
1102 .set_lineif = scc4_lineif,
1103 .wait_closing = SCC_WAIT_CLOSING,
1104 },
1105};
1106
1107int cpm_uart_drv_get_platform_data(struct platform_device *pdev, int is_con)
1108{
1109 struct resource *r;
1110 struct fs_uart_platform_info *pdata = pdev->dev.platform_data;
1111 int idx; /* It is UART_SMCx or UART_SCCx index */
1112 struct uart_cpm_port *pinfo;
1113 int line;
1114 u32 mem, pram;
1115
1116 idx = pdata->fs_no = fs_uart_get_id(pdata);
1117
1118 line = cpm_uart_id2nr(idx);
1119 if(line < 0) {
1120 printk(KERN_ERR"%s(): port %d is not registered", __func__, idx);
1121 return -EINVAL;
1122 }
1123
1124 pinfo = (struct uart_cpm_port *) &cpm_uart_ports[idx];
1125
1126 pinfo->brg = pdata->brg;
1127
1128 if (!is_con) {
1129 pinfo->port.line = line;
1130 pinfo->port.flags = UPF_BOOT_AUTOCONF;
1131 }
1132
1133 if (!(r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs")))
1134 return -EINVAL;
1135 mem = (u32)ioremap(r->start, r->end - r->start + 1);
1136
1137 if (!(r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram")))
1138 return -EINVAL;
1139 pram = (u32)ioremap(r->start, r->end - r->start + 1);
1140
1141 if(idx > fsid_smc2_uart) {
1142 pinfo->sccp = (scc_t *)mem;
1143 pinfo->sccup = (scc_uart_t *)pram;
1144 } else {
1145 pinfo->smcp = (smc_t *)mem;
1146 pinfo->smcup = (smc_uart_t *)pram;
1147 }
1148 pinfo->tx_nrfifos = pdata->tx_num_fifo;
1149 pinfo->tx_fifosize = pdata->tx_buf_size;
1150
1151 pinfo->rx_nrfifos = pdata->rx_num_fifo;
1152 pinfo->rx_fifosize = pdata->rx_buf_size;
1153
1154 pinfo->port.uartclk = pdata->uart_clk;
1155 pinfo->port.mapbase = (unsigned long)mem;
1156 pinfo->port.irq = platform_get_irq(pdev, 0);
1157
1158 return 0;
1159}
1160#endif
1161
1162#ifdef CONFIG_SERIAL_CPM_CONSOLE 964#ifdef CONFIG_SERIAL_CPM_CONSOLE
1163/* 965/*
1164 * Print a string to the serial port trying not to disturb 966 * Print a string to the serial port trying not to disturb
@@ -1169,15 +971,18 @@ int cpm_uart_drv_get_platform_data(struct platform_device *pdev, int is_con)
1169static void cpm_uart_console_write(struct console *co, const char *s, 971static void cpm_uart_console_write(struct console *co, const char *s,
1170 u_int count) 972 u_int count)
1171{ 973{
1172#ifdef CONFIG_PPC_CPM_NEW_BINDING
1173 struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; 974 struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
1174#else
1175 struct uart_cpm_port *pinfo =
1176 &cpm_uart_ports[cpm_uart_port_map[co->index]];
1177#endif
1178 unsigned int i; 975 unsigned int i;
1179 cbd_t __iomem *bdp, *bdbase; 976 cbd_t __iomem *bdp, *bdbase;
1180 unsigned char *cp; 977 unsigned char *cp;
978 unsigned long flags;
979 int nolock = oops_in_progress;
980
981 if (unlikely(nolock)) {
982 local_irq_save(flags);
983 } else {
984 spin_lock_irqsave(&pinfo->port.lock, flags);
985 }
1181 986
1182 /* Get the address of the host memory buffer. 987 /* Get the address of the host memory buffer.
1183 */ 988 */
@@ -1239,6 +1044,12 @@ static void cpm_uart_console_write(struct console *co, const char *s,
1239 ; 1044 ;
1240 1045
1241 pinfo->tx_cur = bdp; 1046 pinfo->tx_cur = bdp;
1047
1048 if (unlikely(nolock)) {
1049 local_irq_restore(flags);
1050 } else {
1051 spin_unlock_irqrestore(&pinfo->port.lock, flags);
1052 }
1242} 1053}
1243 1054
1244 1055
@@ -1252,7 +1063,6 @@ static int __init cpm_uart_console_setup(struct console *co, char *options)
1252 struct uart_cpm_port *pinfo; 1063 struct uart_cpm_port *pinfo;
1253 struct uart_port *port; 1064 struct uart_port *port;
1254 1065
1255#ifdef CONFIG_PPC_CPM_NEW_BINDING
1256 struct device_node *np = NULL; 1066 struct device_node *np = NULL;
1257 int i = 0; 1067 int i = 0;
1258 1068
@@ -1284,35 +1094,6 @@ static int __init cpm_uart_console_setup(struct console *co, char *options)
1284 if (ret) 1094 if (ret)
1285 return ret; 1095 return ret;
1286 1096
1287#else
1288
1289 struct fs_uart_platform_info *pdata;
1290 struct platform_device* pdev = early_uart_get_pdev(co->index);
1291
1292 if (!pdev) {
1293 pr_info("cpm_uart: console: compat mode\n");
1294 /* compatibility - will be cleaned up */
1295 cpm_uart_init_portdesc();
1296 }
1297
1298 port =
1299 (struct uart_port *)&cpm_uart_ports[cpm_uart_port_map[co->index]];
1300 pinfo = (struct uart_cpm_port *)port;
1301 if (!pdev) {
1302 if (pinfo->set_lineif)
1303 pinfo->set_lineif(pinfo);
1304 } else {
1305 pdata = pdev->dev.platform_data;
1306 if (pdata)
1307 if (pdata->init_ioports)
1308 pdata->init_ioports(pdata);
1309
1310 cpm_uart_drv_get_platform_data(pdev, 1);
1311 }
1312
1313 pinfo->flags |= FLAG_CONSOLE;
1314#endif
1315
1316 if (options) { 1097 if (options) {
1317 uart_parse_options(options, &baud, &parity, &bits, &flow); 1098 uart_parse_options(options, &baud, &parity, &bits, &flow);
1318 } else { 1099 } else {
@@ -1386,7 +1167,6 @@ static struct uart_driver cpm_reg = {
1386 .nr = UART_NR, 1167 .nr = UART_NR,
1387}; 1168};
1388 1169
1389#ifdef CONFIG_PPC_CPM_NEW_BINDING
1390static int probe_index; 1170static int probe_index;
1391 1171
1392static int __devinit cpm_uart_probe(struct of_device *ofdev, 1172static int __devinit cpm_uart_probe(struct of_device *ofdev,
@@ -1457,135 +1237,6 @@ static void __exit cpm_uart_exit(void)
1457 of_unregister_platform_driver(&cpm_uart_driver); 1237 of_unregister_platform_driver(&cpm_uart_driver);
1458 uart_unregister_driver(&cpm_reg); 1238 uart_unregister_driver(&cpm_reg);
1459} 1239}
1460#else
1461static int cpm_uart_drv_probe(struct device *dev)
1462{
1463 struct platform_device *pdev = to_platform_device(dev);
1464 struct fs_uart_platform_info *pdata;
1465 int ret = -ENODEV;
1466
1467 if(!pdev) {
1468 printk(KERN_ERR"CPM UART: platform data missing!\n");
1469 return ret;
1470 }
1471
1472 pdata = pdev->dev.platform_data;
1473
1474 if ((ret = cpm_uart_drv_get_platform_data(pdev, 0)))
1475 return ret;
1476
1477 pr_debug("cpm_uart_drv_probe: Adding CPM UART %d\n", cpm_uart_id2nr(pdata->fs_no));
1478
1479 if (pdata->init_ioports)
1480 pdata->init_ioports(pdata);
1481
1482 ret = uart_add_one_port(&cpm_reg, &cpm_uart_ports[pdata->fs_no].port);
1483
1484 return ret;
1485}
1486
1487static int cpm_uart_drv_remove(struct device *dev)
1488{
1489 struct platform_device *pdev = to_platform_device(dev);
1490 struct fs_uart_platform_info *pdata = pdev->dev.platform_data;
1491
1492 pr_debug("cpm_uart_drv_remove: Removing CPM UART %d\n",
1493 cpm_uart_id2nr(pdata->fs_no));
1494
1495 uart_remove_one_port(&cpm_reg, &cpm_uart_ports[pdata->fs_no].port);
1496 return 0;
1497}
1498
1499static struct device_driver cpm_smc_uart_driver = {
1500 .name = "fsl-cpm-smc:uart",
1501 .bus = &platform_bus_type,
1502 .probe = cpm_uart_drv_probe,
1503 .remove = cpm_uart_drv_remove,
1504};
1505
1506static struct device_driver cpm_scc_uart_driver = {
1507 .name = "fsl-cpm-scc:uart",
1508 .bus = &platform_bus_type,
1509 .probe = cpm_uart_drv_probe,
1510 .remove = cpm_uart_drv_remove,
1511};
1512
1513/*
1514 This is supposed to match uart devices on platform bus,
1515 */
1516static int match_is_uart (struct device* dev, void* data)
1517{
1518 struct platform_device* pdev = container_of(dev, struct platform_device, dev);
1519 int ret = 0;
1520 /* this was setfunc as uart */
1521 if(strstr(pdev->name,":uart")) {
1522 ret = 1;
1523 }
1524 return ret;
1525}
1526
1527
1528static int cpm_uart_init(void) {
1529
1530 int ret;
1531 int i;
1532 struct device *dev;
1533 printk(KERN_INFO "Serial: CPM driver $Revision: 0.02 $\n");
1534
1535 /* lookup the bus for uart devices */
1536 dev = bus_find_device(&platform_bus_type, NULL, 0, match_is_uart);
1537
1538 /* There are devices on the bus - all should be OK */
1539 if (dev) {
1540 cpm_uart_count();
1541 cpm_reg.nr = cpm_uart_nr;
1542
1543 if (!(ret = uart_register_driver(&cpm_reg))) {
1544 if ((ret = driver_register(&cpm_smc_uart_driver))) {
1545 uart_unregister_driver(&cpm_reg);
1546 return ret;
1547 }
1548 if ((ret = driver_register(&cpm_scc_uart_driver))) {
1549 driver_unregister(&cpm_scc_uart_driver);
1550 uart_unregister_driver(&cpm_reg);
1551 }
1552 }
1553 } else {
1554 /* No capable platform devices found - falling back to legacy mode */
1555 pr_info("cpm_uart: WARNING: no UART devices found on platform bus!\n");
1556 pr_info(
1557 "cpm_uart: the driver will guess configuration, but this mode is no longer supported.\n");
1558
1559 /* Don't run this again, if the console driver did it already */
1560 if (cpm_uart_nr == 0)
1561 cpm_uart_init_portdesc();
1562
1563 cpm_reg.nr = cpm_uart_nr;
1564 ret = uart_register_driver(&cpm_reg);
1565
1566 if (ret)
1567 return ret;
1568
1569 for (i = 0; i < cpm_uart_nr; i++) {
1570 int con = cpm_uart_port_map[i];
1571 cpm_uart_ports[con].port.line = i;
1572 cpm_uart_ports[con].port.flags = UPF_BOOT_AUTOCONF;
1573 if (cpm_uart_ports[con].set_lineif)
1574 cpm_uart_ports[con].set_lineif(&cpm_uart_ports[con]);
1575 uart_add_one_port(&cpm_reg, &cpm_uart_ports[con].port);
1576 }
1577
1578 }
1579 return ret;
1580}
1581
1582static void __exit cpm_uart_exit(void)
1583{
1584 driver_unregister(&cpm_scc_uart_driver);
1585 driver_unregister(&cpm_smc_uart_driver);
1586 uart_unregister_driver(&cpm_reg);
1587}
1588#endif
1589 1240
1590module_init(cpm_uart_init); 1241module_init(cpm_uart_init);
1591module_exit(cpm_uart_exit); 1242module_exit(cpm_uart_exit);
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
index 74f1432bb248..0f0aff06c596 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
@@ -9,7 +9,7 @@
9 * Copyright (C) 2004 Freescale Semiconductor, Inc. 9 * Copyright (C) 2004 Freescale Semiconductor, Inc.
10 * (C) 2004 Intracom, S.A. 10 * (C) 2004 Intracom, S.A.
11 * (C) 2006 MontaVista Software, Inc. 11 * (C) 2006 MontaVista Software, Inc.
12 * Vitaly Bordug <vbordug@ru.mvista.com> 12 * Vitaly Bordug <vbordug@ru.mvista.com>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
@@ -51,7 +51,6 @@
51 51
52/**************************************************************/ 52/**************************************************************/
53 53
54#ifdef CONFIG_PPC_CPM_NEW_BINDING
55void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd) 54void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
56{ 55{
57 cpm_command(port->command, cmd); 56 cpm_command(port->command, cmd);
@@ -68,75 +67,6 @@ void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram)
68 iounmap(pram); 67 iounmap(pram);
69} 68}
70 69
71#else
72void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
73{
74 ushort val;
75 int line = port - cpm_uart_ports;
76 volatile cpm8xx_t *cp = cpmp;
77
78 switch (line) {
79 case UART_SMC1:
80 val = mk_cr_cmd(CPM_CR_CH_SMC1, cmd) | CPM_CR_FLG;
81 break;
82 case UART_SMC2:
83 val = mk_cr_cmd(CPM_CR_CH_SMC2, cmd) | CPM_CR_FLG;
84 break;
85 case UART_SCC1:
86 val = mk_cr_cmd(CPM_CR_CH_SCC1, cmd) | CPM_CR_FLG;
87 break;
88 case UART_SCC2:
89 val = mk_cr_cmd(CPM_CR_CH_SCC2, cmd) | CPM_CR_FLG;
90 break;
91 case UART_SCC3:
92 val = mk_cr_cmd(CPM_CR_CH_SCC3, cmd) | CPM_CR_FLG;
93 break;
94 case UART_SCC4:
95 val = mk_cr_cmd(CPM_CR_CH_SCC4, cmd) | CPM_CR_FLG;
96 break;
97 default:
98 return;
99
100 }
101 cp->cp_cpcr = val;
102 while (cp->cp_cpcr & CPM_CR_FLG) ;
103}
104
105void smc1_lineif(struct uart_cpm_port *pinfo)
106{
107 pinfo->brg = 1;
108}
109
110void smc2_lineif(struct uart_cpm_port *pinfo)
111{
112 pinfo->brg = 2;
113}
114
115void scc1_lineif(struct uart_cpm_port *pinfo)
116{
117 /* XXX SCC1: insert port configuration here */
118 pinfo->brg = 1;
119}
120
121void scc2_lineif(struct uart_cpm_port *pinfo)
122{
123 /* XXX SCC2: insert port configuration here */
124 pinfo->brg = 2;
125}
126
127void scc3_lineif(struct uart_cpm_port *pinfo)
128{
129 /* XXX SCC3: insert port configuration here */
130 pinfo->brg = 3;
131}
132
133void scc4_lineif(struct uart_cpm_port *pinfo)
134{
135 /* XXX SCC4: insert port configuration here */
136 pinfo->brg = 4;
137}
138#endif
139
140/* 70/*
141 * Allocate DP-Ram and memory buffers. We need to allocate a transmit and 71 * Allocate DP-Ram and memory buffers. We need to allocate a transmit and
142 * receive buffer descriptors from dual port ram, and a character 72 * receive buffer descriptors from dual port ram, and a character
@@ -205,101 +135,3 @@ void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
205 135
206 cpm_dpfree(pinfo->dp_addr); 136 cpm_dpfree(pinfo->dp_addr);
207} 137}
208
209#ifndef CONFIG_PPC_CPM_NEW_BINDING
210/* Setup any dynamic params in the uart desc */
211int cpm_uart_init_portdesc(void)
212{
213 pr_debug("CPM uart[-]:init portdesc\n");
214
215 cpm_uart_nr = 0;
216#ifdef CONFIG_SERIAL_CPM_SMC1
217 cpm_uart_ports[UART_SMC1].smcp = &cpmp->cp_smc[0];
218/*
219 * Is SMC1 being relocated?
220 */
221# ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
222 cpm_uart_ports[UART_SMC1].smcup =
223 (smc_uart_t *) & cpmp->cp_dparam[0x3C0];
224# else
225 cpm_uart_ports[UART_SMC1].smcup =
226 (smc_uart_t *) & cpmp->cp_dparam[PROFF_SMC1];
227# endif
228 cpm_uart_ports[UART_SMC1].port.mapbase =
229 (unsigned long)&cpmp->cp_smc[0];
230 cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX);
231 cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
232 cpm_uart_ports[UART_SMC1].port.uartclk = uart_clock();
233 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1;
234#endif
235
236#ifdef CONFIG_SERIAL_CPM_SMC2
237 cpm_uart_ports[UART_SMC2].smcp = &cpmp->cp_smc[1];
238 cpm_uart_ports[UART_SMC2].smcup =
239 (smc_uart_t *) & cpmp->cp_dparam[PROFF_SMC2];
240 cpm_uart_ports[UART_SMC2].port.mapbase =
241 (unsigned long)&cpmp->cp_smc[1];
242 cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX);
243 cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
244 cpm_uart_ports[UART_SMC2].port.uartclk = uart_clock();
245 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2;
246#endif
247
248#ifdef CONFIG_SERIAL_CPM_SCC1
249 cpm_uart_ports[UART_SCC1].sccp = &cpmp->cp_scc[0];
250 cpm_uart_ports[UART_SCC1].sccup =
251 (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC1];
252 cpm_uart_ports[UART_SCC1].port.mapbase =
253 (unsigned long)&cpmp->cp_scc[0];
254 cpm_uart_ports[UART_SCC1].sccp->scc_sccm &=
255 ~(UART_SCCM_TX | UART_SCCM_RX);
256 cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &=
257 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
258 cpm_uart_ports[UART_SCC1].port.uartclk = uart_clock();
259 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1;
260#endif
261
262#ifdef CONFIG_SERIAL_CPM_SCC2
263 cpm_uart_ports[UART_SCC2].sccp = &cpmp->cp_scc[1];
264 cpm_uart_ports[UART_SCC2].sccup =
265 (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC2];
266 cpm_uart_ports[UART_SCC2].port.mapbase =
267 (unsigned long)&cpmp->cp_scc[1];
268 cpm_uart_ports[UART_SCC2].sccp->scc_sccm &=
269 ~(UART_SCCM_TX | UART_SCCM_RX);
270 cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &=
271 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
272 cpm_uart_ports[UART_SCC2].port.uartclk = uart_clock();
273 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2;
274#endif
275
276#ifdef CONFIG_SERIAL_CPM_SCC3
277 cpm_uart_ports[UART_SCC3].sccp = &cpmp->cp_scc[2];
278 cpm_uart_ports[UART_SCC3].sccup =
279 (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC3];
280 cpm_uart_ports[UART_SCC3].port.mapbase =
281 (unsigned long)&cpmp->cp_scc[2];
282 cpm_uart_ports[UART_SCC3].sccp->scc_sccm &=
283 ~(UART_SCCM_TX | UART_SCCM_RX);
284 cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &=
285 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
286 cpm_uart_ports[UART_SCC3].port.uartclk = uart_clock();
287 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3;
288#endif
289
290#ifdef CONFIG_SERIAL_CPM_SCC4
291 cpm_uart_ports[UART_SCC4].sccp = &cpmp->cp_scc[3];
292 cpm_uart_ports[UART_SCC4].sccup =
293 (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC4];
294 cpm_uart_ports[UART_SCC4].port.mapbase =
295 (unsigned long)&cpmp->cp_scc[3];
296 cpm_uart_ports[UART_SCC4].sccp->scc_sccm &=
297 ~(UART_SCCM_TX | UART_SCCM_RX);
298 cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &=
299 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
300 cpm_uart_ports[UART_SCC4].port.uartclk = uart_clock();
301 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4;
302#endif
303 return 0;
304}
305#endif
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
index ddf46d3c964b..10eecd6af6d4 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
@@ -2,7 +2,7 @@
2 * linux/drivers/serial/cpm_uart/cpm_uart_cpm1.h 2 * linux/drivers/serial/cpm_uart/cpm_uart_cpm1.h
3 * 3 *
4 * Driver for CPM (SCC/SMC) serial ports 4 * Driver for CPM (SCC/SMC) serial ports
5 * 5 *
6 * definitions for cpm1 6 * definitions for cpm1
7 * 7 *
8 */ 8 */
@@ -12,16 +12,6 @@
12 12
13#include <asm/cpm1.h> 13#include <asm/cpm1.h>
14 14
15/* defines for IRQs */
16#ifndef CONFIG_PPC_CPM_NEW_BINDING
17#define SMC1_IRQ (CPM_IRQ_OFFSET + CPMVEC_SMC1)
18#define SMC2_IRQ (CPM_IRQ_OFFSET + CPMVEC_SMC2)
19#define SCC1_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC1)
20#define SCC2_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC2)
21#define SCC3_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC3)
22#define SCC4_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC4)
23#endif
24
25static inline void cpm_set_brg(int brg, int baud) 15static inline void cpm_set_brg(int brg, int baud)
26{ 16{
27 cpm_setbrg(brg, baud); 17 cpm_setbrg(brg, baud);
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
index bb862e2f54cf..b8db4d3eed36 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
@@ -5,11 +5,11 @@
5 * 5 *
6 * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2) 6 * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2)
7 * Pantelis Antoniou (panto@intracom.gr) (CPM1) 7 * Pantelis Antoniou (panto@intracom.gr) (CPM1)
8 * 8 *
9 * Copyright (C) 2004 Freescale Semiconductor, Inc. 9 * Copyright (C) 2004 Freescale Semiconductor, Inc.
10 * (C) 2004 Intracom, S.A. 10 * (C) 2004 Intracom, S.A.
11 * (C) 2006 MontaVista Software, Inc. 11 * (C) 2006 MontaVista Software, Inc.
12 * Vitaly Bordug <vbordug@ru.mvista.com> 12 * Vitaly Bordug <vbordug@ru.mvista.com>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
@@ -41,9 +41,7 @@
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/irq.h> 42#include <asm/irq.h>
43#include <asm/fs_pd.h> 43#include <asm/fs_pd.h>
44#ifdef CONFIG_PPC_CPM_NEW_BINDING
45#include <asm/prom.h> 44#include <asm/prom.h>
46#endif
47 45
48#include <linux/serial_core.h> 46#include <linux/serial_core.h>
49#include <linux/kernel.h> 47#include <linux/kernel.h>
@@ -52,7 +50,6 @@
52 50
53/**************************************************************/ 51/**************************************************************/
54 52
55#ifdef CONFIG_PPC_CPM_NEW_BINDING
56void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd) 53void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
57{ 54{
58 cpm_command(port->command, cmd); 55 cpm_command(port->command, cmd);
@@ -106,174 +103,8 @@ void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram)
106 iounmap(pram); 103 iounmap(pram);
107} 104}
108 105
109#else
110void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
111{
112 ulong val;
113 int line = port - cpm_uart_ports;
114 volatile cpm_cpm2_t *cp = cpm2_map(im_cpm);
115
116
117 switch (line) {
118 case UART_SMC1:
119 val = mk_cr_cmd(CPM_CR_SMC1_PAGE, CPM_CR_SMC1_SBLOCK, 0,
120 cmd) | CPM_CR_FLG;
121 break;
122 case UART_SMC2:
123 val = mk_cr_cmd(CPM_CR_SMC2_PAGE, CPM_CR_SMC2_SBLOCK, 0,
124 cmd) | CPM_CR_FLG;
125 break;
126 case UART_SCC1:
127 val = mk_cr_cmd(CPM_CR_SCC1_PAGE, CPM_CR_SCC1_SBLOCK, 0,
128 cmd) | CPM_CR_FLG;
129 break;
130 case UART_SCC2:
131 val = mk_cr_cmd(CPM_CR_SCC2_PAGE, CPM_CR_SCC2_SBLOCK, 0,
132 cmd) | CPM_CR_FLG;
133 break;
134 case UART_SCC3:
135 val = mk_cr_cmd(CPM_CR_SCC3_PAGE, CPM_CR_SCC3_SBLOCK, 0,
136 cmd) | CPM_CR_FLG;
137 break;
138 case UART_SCC4:
139 val = mk_cr_cmd(CPM_CR_SCC4_PAGE, CPM_CR_SCC4_SBLOCK, 0,
140 cmd) | CPM_CR_FLG;
141 break;
142 default:
143 return;
144
145 }
146 cp->cp_cpcr = val;
147 while (cp->cp_cpcr & CPM_CR_FLG) ;
148
149 cpm2_unmap(cp);
150}
151
152void smc1_lineif(struct uart_cpm_port *pinfo)
153{
154 volatile iop_cpm2_t *io = cpm2_map(im_ioport);
155 volatile cpmux_t *cpmux = cpm2_map(im_cpmux);
156
157 /* SMC1 is only on port D */
158 io->iop_ppard |= 0x00c00000;
159 io->iop_pdird |= 0x00400000;
160 io->iop_pdird &= ~0x00800000;
161 io->iop_psord &= ~0x00c00000;
162
163 /* Wire BRG1 to SMC1 */
164 cpmux->cmx_smr &= 0x0f;
165 pinfo->brg = 1;
166
167 cpm2_unmap(cpmux);
168 cpm2_unmap(io);
169}
170
171void smc2_lineif(struct uart_cpm_port *pinfo)
172{
173 volatile iop_cpm2_t *io = cpm2_map(im_ioport);
174 volatile cpmux_t *cpmux = cpm2_map(im_cpmux);
175
176 /* SMC2 is only on port A */
177 io->iop_ppara |= 0x00c00000;
178 io->iop_pdira |= 0x00400000;
179 io->iop_pdira &= ~0x00800000;
180 io->iop_psora &= ~0x00c00000;
181
182 /* Wire BRG2 to SMC2 */
183 cpmux->cmx_smr &= 0xf0;
184 pinfo->brg = 2;
185
186 cpm2_unmap(cpmux);
187 cpm2_unmap(io);
188}
189
190void scc1_lineif(struct uart_cpm_port *pinfo)
191{
192 volatile iop_cpm2_t *io = cpm2_map(im_ioport);
193 volatile cpmux_t *cpmux = cpm2_map(im_cpmux);
194
195 /* Use Port D for SCC1 instead of other functions. */
196 io->iop_ppard |= 0x00000003;
197 io->iop_psord &= ~0x00000001; /* Rx */
198 io->iop_psord |= 0x00000002; /* Tx */
199 io->iop_pdird &= ~0x00000001; /* Rx */
200 io->iop_pdird |= 0x00000002; /* Tx */
201
202 /* Wire BRG1 to SCC1 */
203 cpmux->cmx_scr &= 0x00ffffff;
204 cpmux->cmx_scr |= 0x00000000;
205 pinfo->brg = 1;
206
207 cpm2_unmap(cpmux);
208 cpm2_unmap(io);
209}
210
211void scc2_lineif(struct uart_cpm_port *pinfo)
212{
213 /*
214 * STx GP3 uses the SCC2 secondary option pin assignment
215 * which this driver doesn't account for in the static
216 * pin assignments. This kind of board specific info
217 * really has to get out of the driver so boards can
218 * be supported in a sane fashion.
219 */
220 volatile cpmux_t *cpmux = cpm2_map(im_cpmux);
221#ifndef CONFIG_STX_GP3
222 volatile iop_cpm2_t *io = cpm2_map(im_ioport);
223
224 io->iop_pparb |= 0x008b0000;
225 io->iop_pdirb |= 0x00880000;
226 io->iop_psorb |= 0x00880000;
227 io->iop_pdirb &= ~0x00030000;
228 io->iop_psorb &= ~0x00030000;
229#endif
230 cpmux->cmx_scr &= 0xff00ffff;
231 cpmux->cmx_scr |= 0x00090000;
232 pinfo->brg = 2;
233
234 cpm2_unmap(cpmux);
235 cpm2_unmap(io);
236}
237
238void scc3_lineif(struct uart_cpm_port *pinfo)
239{
240 volatile iop_cpm2_t *io = cpm2_map(im_ioport);
241 volatile cpmux_t *cpmux = cpm2_map(im_cpmux);
242
243 io->iop_pparb |= 0x008b0000;
244 io->iop_pdirb |= 0x00880000;
245 io->iop_psorb |= 0x00880000;
246 io->iop_pdirb &= ~0x00030000;
247 io->iop_psorb &= ~0x00030000;
248 cpmux->cmx_scr &= 0xffff00ff;
249 cpmux->cmx_scr |= 0x00001200;
250 pinfo->brg = 3;
251
252 cpm2_unmap(cpmux);
253 cpm2_unmap(io);
254}
255
256void scc4_lineif(struct uart_cpm_port *pinfo)
257{
258 volatile iop_cpm2_t *io = cpm2_map(im_ioport);
259 volatile cpmux_t *cpmux = cpm2_map(im_cpmux);
260
261 io->iop_ppard |= 0x00000600;
262 io->iop_psord &= ~0x00000600; /* Tx/Rx */
263 io->iop_pdird &= ~0x00000200; /* Rx */
264 io->iop_pdird |= 0x00000400; /* Tx */
265
266 cpmux->cmx_scr &= 0xffffff00;
267 cpmux->cmx_scr |= 0x0000001b;
268 pinfo->brg = 4;
269
270 cpm2_unmap(cpmux);
271 cpm2_unmap(io);
272}
273#endif
274
275/* 106/*
276 * Allocate DP-Ram and memory buffers. We need to allocate a transmit and 107 * Allocate DP-Ram and memory buffers. We need to allocate a transmit and
277 * receive buffer descriptors from dual port ram, and a character 108 * receive buffer descriptors from dual port ram, and a character
278 * buffer area from host mem. If we are allocating for the console we need 109 * buffer area from host mem. If we are allocating for the console we need
279 * to do it from bootmem 110 * to do it from bootmem
@@ -340,111 +171,3 @@ void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
340 171
341 cpm_dpfree(pinfo->dp_addr); 172 cpm_dpfree(pinfo->dp_addr);
342} 173}
343
344#ifndef CONFIG_PPC_CPM_NEW_BINDING
345/* Setup any dynamic params in the uart desc */
346int cpm_uart_init_portdesc(void)
347{
348#if defined(CONFIG_SERIAL_CPM_SMC1) || defined(CONFIG_SERIAL_CPM_SMC2)
349 u16 *addr;
350#endif
351 pr_debug("CPM uart[-]:init portdesc\n");
352
353 cpm_uart_nr = 0;
354#ifdef CONFIG_SERIAL_CPM_SMC1
355 cpm_uart_ports[UART_SMC1].smcp = (smc_t *) cpm2_map(im_smc[0]);
356 cpm_uart_ports[UART_SMC1].port.mapbase =
357 (unsigned long)cpm_uart_ports[UART_SMC1].smcp;
358
359 cpm_uart_ports[UART_SMC1].smcup =
360 (smc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SMC1], PROFF_SMC_SIZE);
361 addr = (u16 *)cpm2_map_size(im_dprambase[PROFF_SMC1_BASE], 2);
362 *addr = PROFF_SMC1;
363 cpm2_unmap(addr);
364
365 cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX);
366 cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
367 cpm_uart_ports[UART_SMC1].port.uartclk = uart_clock();
368 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1;
369#endif
370
371#ifdef CONFIG_SERIAL_CPM_SMC2
372 cpm_uart_ports[UART_SMC2].smcp = (smc_t *) cpm2_map(im_smc[1]);
373 cpm_uart_ports[UART_SMC2].port.mapbase =
374 (unsigned long)cpm_uart_ports[UART_SMC2].smcp;
375
376 cpm_uart_ports[UART_SMC2].smcup =
377 (smc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SMC2], PROFF_SMC_SIZE);
378 addr = (u16 *)cpm2_map_size(im_dprambase[PROFF_SMC2_BASE], 2);
379 *addr = PROFF_SMC2;
380 cpm2_unmap(addr);
381
382 cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX);
383 cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
384 cpm_uart_ports[UART_SMC2].port.uartclk = uart_clock();
385 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2;
386#endif
387
388#ifdef CONFIG_SERIAL_CPM_SCC1
389 cpm_uart_ports[UART_SCC1].sccp = (scc_t *) cpm2_map(im_scc[0]);
390 cpm_uart_ports[UART_SCC1].port.mapbase =
391 (unsigned long)cpm_uart_ports[UART_SCC1].sccp;
392 cpm_uart_ports[UART_SCC1].sccup =
393 (scc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SCC1], PROFF_SCC_SIZE);
394
395 cpm_uart_ports[UART_SCC1].sccp->scc_sccm &=
396 ~(UART_SCCM_TX | UART_SCCM_RX);
397 cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &=
398 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
399 cpm_uart_ports[UART_SCC1].port.uartclk = uart_clock();
400 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1;
401#endif
402
403#ifdef CONFIG_SERIAL_CPM_SCC2
404 cpm_uart_ports[UART_SCC2].sccp = (scc_t *) cpm2_map(im_scc[1]);
405 cpm_uart_ports[UART_SCC2].port.mapbase =
406 (unsigned long)cpm_uart_ports[UART_SCC2].sccp;
407 cpm_uart_ports[UART_SCC2].sccup =
408 (scc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SCC2], PROFF_SCC_SIZE);
409
410 cpm_uart_ports[UART_SCC2].sccp->scc_sccm &=
411 ~(UART_SCCM_TX | UART_SCCM_RX);
412 cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &=
413 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
414 cpm_uart_ports[UART_SCC2].port.uartclk = uart_clock();
415 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2;
416#endif
417
418#ifdef CONFIG_SERIAL_CPM_SCC3
419 cpm_uart_ports[UART_SCC3].sccp = (scc_t *) cpm2_map(im_scc[2]);
420 cpm_uart_ports[UART_SCC3].port.mapbase =
421 (unsigned long)cpm_uart_ports[UART_SCC3].sccp;
422 cpm_uart_ports[UART_SCC3].sccup =
423 (scc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SCC3], PROFF_SCC_SIZE);
424
425 cpm_uart_ports[UART_SCC3].sccp->scc_sccm &=
426 ~(UART_SCCM_TX | UART_SCCM_RX);
427 cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &=
428 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
429 cpm_uart_ports[UART_SCC3].port.uartclk = uart_clock();
430 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3;
431#endif
432
433#ifdef CONFIG_SERIAL_CPM_SCC4
434 cpm_uart_ports[UART_SCC4].sccp = (scc_t *) cpm2_map(im_scc[3]);
435 cpm_uart_ports[UART_SCC4].port.mapbase =
436 (unsigned long)cpm_uart_ports[UART_SCC4].sccp;
437 cpm_uart_ports[UART_SCC4].sccup =
438 (scc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SCC4], PROFF_SCC_SIZE);
439
440 cpm_uart_ports[UART_SCC4].sccp->scc_sccm &=
441 ~(UART_SCCM_TX | UART_SCCM_RX);
442 cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &=
443 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
444 cpm_uart_ports[UART_SCC4].port.uartclk = uart_clock();
445 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4;
446#endif
447
448 return 0;
449}
450#endif
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/serial/cpm_uart/cpm_uart_cpm2.h
index 40006a7dce46..7194c63dcf5f 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.h
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.h
@@ -2,7 +2,7 @@
2 * linux/drivers/serial/cpm_uart/cpm_uart_cpm2.h 2 * linux/drivers/serial/cpm_uart/cpm_uart_cpm2.h
3 * 3 *
4 * Driver for CPM (SCC/SMC) serial ports 4 * Driver for CPM (SCC/SMC) serial ports
5 * 5 *
6 * definitions for cpm2 6 * definitions for cpm2
7 * 7 *
8 */ 8 */
@@ -12,16 +12,6 @@
12 12
13#include <asm/cpm2.h> 13#include <asm/cpm2.h>
14 14
15/* defines for IRQs */
16#ifndef CONFIG_PPC_CPM_NEW_BINDING
17#define SMC1_IRQ SIU_INT_SMC1
18#define SMC2_IRQ SIU_INT_SMC2
19#define SCC1_IRQ SIU_INT_SCC1
20#define SCC2_IRQ SIU_INT_SCC2
21#define SCC3_IRQ SIU_INT_SCC3
22#define SCC4_IRQ SIU_INT_SCC4
23#endif
24
25static inline void cpm_set_brg(int brg, int baud) 15static inline void cpm_set_brg(int brg, int baud)
26{ 16{
27 cpm_setbrg(brg, baud); 17 cpm_setbrg(brg, baud);
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 25029c7570b6..8fa0ff561e9f 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -13,8 +13,8 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/serial_core.h> 14#include <linux/serial_core.h>
15#include <linux/serial_8250.h> 15#include <linux/serial_8250.h>
16#include <linux/of_platform.h>
16 17
17#include <asm/of_platform.h>
18#include <asm/prom.h> 18#include <asm/prom.h>
19 19
20struct of_serial_info { 20struct of_serial_info {
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 681d62325d3d..604e5f0a2d95 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -17,7 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18 18
19#if defined(CONFIG_PPC_MERGE) 19#if defined(CONFIG_PPC_MERGE)
20#include <asm/of_platform.h> 20#include <linux/of_platform.h>
21#else 21#else
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#endif 23#endif
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index cbe71a5338d0..03b3670130a0 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -31,11 +31,11 @@
31#include <linux/fb.h> 31#include <linux/fb.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/nvram.h> 33#include <linux/nvram.h>
34#include <linux/of_device.h>
35#include <linux/of_platform.h>
34#include <asm/io.h> 36#include <asm/io.h>
35#include <asm/prom.h> 37#include <asm/prom.h>
36#include <asm/pgtable.h> 38#include <asm/pgtable.h>
37#include <asm/of_device.h>
38#include <asm/of_platform.h>
39 39
40#include "macmodes.h" 40#include "macmodes.h"
41#include "platinumfb.h" 41#include "platinumfb.h"
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 0fd5820d5c61..df52cb355f7d 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -94,21 +94,31 @@ static const u8 ds2482_chan_rd[8] =
94#define DS2482_REG_STS_1WB 0x01 94#define DS2482_REG_STS_1WB 0x01
95 95
96 96
97static int ds2482_attach_adapter(struct i2c_adapter *adapter); 97static int ds2482_probe(struct i2c_client *client,
98static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind); 98 const struct i2c_device_id *id);
99static int ds2482_detach_client(struct i2c_client *client); 99static int ds2482_detect(struct i2c_client *client, int kind,
100 struct i2c_board_info *info);
101static int ds2482_remove(struct i2c_client *client);
100 102
101 103
102/** 104/**
103 * Driver data (common to all clients) 105 * Driver data (common to all clients)
104 */ 106 */
107static const struct i2c_device_id ds2482_id[] = {
108 { "ds2482", 0 },
109 { }
110};
111
105static struct i2c_driver ds2482_driver = { 112static struct i2c_driver ds2482_driver = {
106 .driver = { 113 .driver = {
107 .owner = THIS_MODULE, 114 .owner = THIS_MODULE,
108 .name = "ds2482", 115 .name = "ds2482",
109 }, 116 },
110 .attach_adapter = ds2482_attach_adapter, 117 .probe = ds2482_probe,
111 .detach_client = ds2482_detach_client, 118 .remove = ds2482_remove,
119 .id_table = ds2482_id,
120 .detect = ds2482_detect,
121 .address_data = &addr_data,
112}; 122};
113 123
114/* 124/*
@@ -124,7 +134,7 @@ struct ds2482_w1_chan {
124}; 134};
125 135
126struct ds2482_data { 136struct ds2482_data {
127 struct i2c_client client; 137 struct i2c_client *client;
128 struct mutex access_lock; 138 struct mutex access_lock;
129 139
130 /* 1-wire interface(s) */ 140 /* 1-wire interface(s) */
@@ -147,7 +157,7 @@ struct ds2482_data {
147static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr) 157static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr)
148{ 158{
149 if (pdev->read_prt != read_ptr) { 159 if (pdev->read_prt != read_ptr) {
150 if (i2c_smbus_write_byte_data(&pdev->client, 160 if (i2c_smbus_write_byte_data(pdev->client,
151 DS2482_CMD_SET_READ_PTR, 161 DS2482_CMD_SET_READ_PTR,
152 read_ptr) < 0) 162 read_ptr) < 0)
153 return -1; 163 return -1;
@@ -167,7 +177,7 @@ static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr)
167 */ 177 */
168static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd) 178static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd)
169{ 179{
170 if (i2c_smbus_write_byte(&pdev->client, cmd) < 0) 180 if (i2c_smbus_write_byte(pdev->client, cmd) < 0)
171 return -1; 181 return -1;
172 182
173 pdev->read_prt = DS2482_PTR_CODE_STATUS; 183 pdev->read_prt = DS2482_PTR_CODE_STATUS;
@@ -187,7 +197,7 @@ static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd)
187static inline int ds2482_send_cmd_data(struct ds2482_data *pdev, 197static inline int ds2482_send_cmd_data(struct ds2482_data *pdev,
188 u8 cmd, u8 byte) 198 u8 cmd, u8 byte)
189{ 199{
190 if (i2c_smbus_write_byte_data(&pdev->client, cmd, byte) < 0) 200 if (i2c_smbus_write_byte_data(pdev->client, cmd, byte) < 0)
191 return -1; 201 return -1;
192 202
193 /* all cmds leave in STATUS, except CONFIG */ 203 /* all cmds leave in STATUS, except CONFIG */
@@ -216,7 +226,7 @@ static int ds2482_wait_1wire_idle(struct ds2482_data *pdev)
216 226
217 if (!ds2482_select_register(pdev, DS2482_PTR_CODE_STATUS)) { 227 if (!ds2482_select_register(pdev, DS2482_PTR_CODE_STATUS)) {
218 do { 228 do {
219 temp = i2c_smbus_read_byte(&pdev->client); 229 temp = i2c_smbus_read_byte(pdev->client);
220 } while ((temp >= 0) && (temp & DS2482_REG_STS_1WB) && 230 } while ((temp >= 0) && (temp & DS2482_REG_STS_1WB) &&
221 (++retries < DS2482_WAIT_IDLE_TIMEOUT)); 231 (++retries < DS2482_WAIT_IDLE_TIMEOUT));
222 } 232 }
@@ -238,13 +248,13 @@ static int ds2482_wait_1wire_idle(struct ds2482_data *pdev)
238 */ 248 */
239static int ds2482_set_channel(struct ds2482_data *pdev, u8 channel) 249static int ds2482_set_channel(struct ds2482_data *pdev, u8 channel)
240{ 250{
241 if (i2c_smbus_write_byte_data(&pdev->client, DS2482_CMD_CHANNEL_SELECT, 251 if (i2c_smbus_write_byte_data(pdev->client, DS2482_CMD_CHANNEL_SELECT,
242 ds2482_chan_wr[channel]) < 0) 252 ds2482_chan_wr[channel]) < 0)
243 return -1; 253 return -1;
244 254
245 pdev->read_prt = DS2482_PTR_CODE_CHANNEL; 255 pdev->read_prt = DS2482_PTR_CODE_CHANNEL;
246 pdev->channel = -1; 256 pdev->channel = -1;
247 if (i2c_smbus_read_byte(&pdev->client) == ds2482_chan_rd[channel]) { 257 if (i2c_smbus_read_byte(pdev->client) == ds2482_chan_rd[channel]) {
248 pdev->channel = channel; 258 pdev->channel = channel;
249 return 0; 259 return 0;
250 } 260 }
@@ -368,7 +378,7 @@ static u8 ds2482_w1_read_byte(void *data)
368 ds2482_select_register(pdev, DS2482_PTR_CODE_DATA); 378 ds2482_select_register(pdev, DS2482_PTR_CODE_DATA);
369 379
370 /* Read the data byte */ 380 /* Read the data byte */
371 result = i2c_smbus_read_byte(&pdev->client); 381 result = i2c_smbus_read_byte(pdev->client);
372 382
373 mutex_unlock(&pdev->access_lock); 383 mutex_unlock(&pdev->access_lock);
374 384
@@ -415,47 +425,38 @@ static u8 ds2482_w1_reset_bus(void *data)
415} 425}
416 426
417 427
418/** 428static int ds2482_detect(struct i2c_client *client, int kind,
419 * Called to see if the device exists on an i2c bus. 429 struct i2c_board_info *info)
420 */
421static int ds2482_attach_adapter(struct i2c_adapter *adapter)
422{ 430{
423 return i2c_probe(adapter, &addr_data, ds2482_detect); 431 if (!i2c_check_functionality(client->adapter,
424} 432 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
433 I2C_FUNC_SMBUS_BYTE))
434 return -ENODEV;
425 435
436 strlcpy(info->type, "ds2482", I2C_NAME_SIZE);
426 437
427/* 438 return 0;
428 * The following function does more than just detection. If detection 439}
429 * succeeds, it also registers the new chip. 440
430 */ 441static int ds2482_probe(struct i2c_client *client,
431static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind) 442 const struct i2c_device_id *id)
432{ 443{
433 struct ds2482_data *data; 444 struct ds2482_data *data;
434 struct i2c_client *new_client; 445 int err = -ENODEV;
435 int err = 0;
436 int temp1; 446 int temp1;
437 int idx; 447 int idx;
438 448
439 if (!i2c_check_functionality(adapter,
440 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
441 I2C_FUNC_SMBUS_BYTE))
442 goto exit;
443
444 if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) { 449 if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) {
445 err = -ENOMEM; 450 err = -ENOMEM;
446 goto exit; 451 goto exit;
447 } 452 }
448 453
449 new_client = &data->client; 454 data->client = client;
450 i2c_set_clientdata(new_client, data); 455 i2c_set_clientdata(client, data);
451 new_client->addr = address;
452 new_client->driver = &ds2482_driver;
453 new_client->adapter = adapter;
454 456
455 /* Reset the device (sets the read_ptr to status) */ 457 /* Reset the device (sets the read_ptr to status) */
456 if (ds2482_send_cmd(data, DS2482_CMD_RESET) < 0) { 458 if (ds2482_send_cmd(data, DS2482_CMD_RESET) < 0) {
457 dev_dbg(&adapter->dev, "DS2482 reset failed at 0x%02x.\n", 459 dev_warn(&client->dev, "DS2482 reset failed.\n");
458 address);
459 goto exit_free; 460 goto exit_free;
460 } 461 }
461 462
@@ -463,10 +464,10 @@ static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind)
463 ndelay(525); 464 ndelay(525);
464 465
465 /* Read the status byte - only reset bit and line should be set */ 466 /* Read the status byte - only reset bit and line should be set */
466 temp1 = i2c_smbus_read_byte(new_client); 467 temp1 = i2c_smbus_read_byte(client);
467 if (temp1 != (DS2482_REG_STS_LL | DS2482_REG_STS_RST)) { 468 if (temp1 != (DS2482_REG_STS_LL | DS2482_REG_STS_RST)) {
468 dev_dbg(&adapter->dev, "DS2482 (0x%02x) reset status " 469 dev_warn(&client->dev, "DS2482 reset status "
469 "0x%02X - not a DS2482\n", address, temp1); 470 "0x%02X - not a DS2482\n", temp1);
470 goto exit_free; 471 goto exit_free;
471 } 472 }
472 473
@@ -478,16 +479,8 @@ static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind)
478 /* Set all config items to 0 (off) */ 479 /* Set all config items to 0 (off) */
479 ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 0xF0); 480 ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 0xF0);
480 481
481 /* We can fill in the remaining client fields */
482 snprintf(new_client->name, sizeof(new_client->name), "ds2482-%d00",
483 data->w1_count);
484
485 mutex_init(&data->access_lock); 482 mutex_init(&data->access_lock);
486 483
487 /* Tell the I2C layer a new client has arrived */
488 if ((err = i2c_attach_client(new_client)))
489 goto exit_free;
490
491 /* Register 1-wire interface(s) */ 484 /* Register 1-wire interface(s) */
492 for (idx = 0; idx < data->w1_count; idx++) { 485 for (idx = 0; idx < data->w1_count; idx++) {
493 data->w1_ch[idx].pdev = data; 486 data->w1_ch[idx].pdev = data;
@@ -511,8 +504,6 @@ static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind)
511 return 0; 504 return 0;
512 505
513exit_w1_remove: 506exit_w1_remove:
514 i2c_detach_client(new_client);
515
516 for (idx = 0; idx < data->w1_count; idx++) { 507 for (idx = 0; idx < data->w1_count; idx++) {
517 if (data->w1_ch[idx].pdev != NULL) 508 if (data->w1_ch[idx].pdev != NULL)
518 w1_remove_master_device(&data->w1_ch[idx].w1_bm); 509 w1_remove_master_device(&data->w1_ch[idx].w1_bm);
@@ -523,10 +514,10 @@ exit:
523 return err; 514 return err;
524} 515}
525 516
526static int ds2482_detach_client(struct i2c_client *client) 517static int ds2482_remove(struct i2c_client *client)
527{ 518{
528 struct ds2482_data *data = i2c_get_clientdata(client); 519 struct ds2482_data *data = i2c_get_clientdata(client);
529 int err, idx; 520 int idx;
530 521
531 /* Unregister the 1-wire bridge(s) */ 522 /* Unregister the 1-wire bridge(s) */
532 for (idx = 0; idx < data->w1_count; idx++) { 523 for (idx = 0; idx < data->w1_count; idx++) {
@@ -534,13 +525,6 @@ static int ds2482_detach_client(struct i2c_client *client)
534 w1_remove_master_device(&data->w1_ch[idx].w1_bm); 525 w1_remove_master_device(&data->w1_ch[idx].w1_bm);
535 } 526 }
536 527
537 /* Detach the i2c device */
538 if ((err = i2c_detach_client(client))) {
539 dev_err(&client->dev,
540 "Deregistration failed, client not detached.\n");
541 return err;
542 }
543
544 /* Free the memory */ 528 /* Free the memory */
545 kfree(data); 529 kfree(data);
546 return 0; 530 return 0;
diff --git a/drivers/watchdog/mpc5200_wdt.c b/drivers/watchdog/mpc5200_wdt.c
index 80a91d4cea11..77c1c2ae2cc2 100644
--- a/drivers/watchdog/mpc5200_wdt.c
+++ b/drivers/watchdog/mpc5200_wdt.c
@@ -4,7 +4,7 @@
4#include <linux/watchdog.h> 4#include <linux/watchdog.h>
5#include <linux/io.h> 5#include <linux/io.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <asm/of_platform.h> 7#include <linux/of_platform.h>
8#include <asm/uaccess.h> 8#include <asm/uaccess.h>
9#include <asm/mpc52xx.h> 9#include <asm/mpc52xx.h>
10 10